From 33c446dadd265276ec3f12b8e124ad2eff4eae24 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 5 Feb 2024 16:29:45 +0100 Subject: [PATCH 001/286] Refactor library to artifact matching to not use pointers (#1172) ## Changes The approach to do this was: 1. Iterate over all libraries in all job tasks 2. Find references to local libraries 3. Store pointer to `compute.Library` in the matching artifact file to signal it should be uploaded This breaks down when introducing #1098 because we can no longer track unexported state across mutators. The approach in this PR performs the path matching twice; once in the matching mutator where we check if each referenced file has an artifacts section, and once during artifact upload to rewrite the library path from a local file reference to an absolute Databricks path. ## Tests Integration tests pass. --- bundle/artifacts/artifacts.go | 45 ++++++-- bundle/config/artifact.go | 40 +------ bundle/libraries/helpers.go | 16 +++ bundle/libraries/helpers_test.go | 17 +++ bundle/libraries/libraries.go | 142 ++++++------------------ bundle/libraries/libraries_test.go | 99 +++++++++++++---- bundle/libraries/local_path.go | 63 +++++++++++ bundle/libraries/local_path_test.go | 43 +++++++ bundle/libraries/match.go | 45 ++++++++ bundle/libraries/match_test.go | 1 + bundle/libraries/testdata/library1 | 0 bundle/libraries/testdata/library2 | 0 bundle/libraries/workspace_path.go | 38 +++++++ bundle/libraries/workspace_path_test.go | 33 ++++++ bundle/tests/bundle/wheel_test.go | 1 - internal/bundle/artifacts_test.go | 39 +++++-- 16 files changed, 431 insertions(+), 191 deletions(-) create mode 100644 bundle/libraries/helpers.go create mode 100644 bundle/libraries/helpers_test.go create mode 100644 bundle/libraries/local_path.go create mode 100644 bundle/libraries/local_path_test.go create mode 100644 bundle/libraries/match.go create mode 100644 bundle/libraries/match_test.go create mode 100644 bundle/libraries/testdata/library1 create mode 100644 bundle/libraries/testdata/library2 create mode 100644 bundle/libraries/workspace_path.go create mode 100644 bundle/libraries/workspace_path_test.go diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index 76d29f56c..e474240de 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" @@ -106,7 +107,7 @@ func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - err = uploadArtifact(ctx, artifact, uploadPath, client) + err = uploadArtifact(ctx, b, artifact, uploadPath, client) if err != nil { return fmt.Errorf("upload for %s failed, error: %w", m.name, err) } @@ -114,23 +115,45 @@ func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error { return nil } -func uploadArtifact(ctx context.Context, a *config.Artifact, uploadPath string, client filer.Filer) error { +func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, uploadPath string, client filer.Filer) error { + filesToLibraries := libraries.MapFilesToTaskLibraries(ctx, b) + for i := range a.Files { f := &a.Files[i] - if f.NeedsUpload() { - filename := filepath.Base(f.Source) - cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename)) - err := uploadArtifactFile(ctx, f.Source, client) - if err != nil { - return err + // Lookup all tasks that reference this file. + libs, ok := filesToLibraries[f.Source] + if !ok { + log.Debugf(ctx, "No tasks reference %s. Skipping upload.", f.Source) + continue + } + + filename := filepath.Base(f.Source) + cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename)) + + err := uploadArtifactFile(ctx, f.Source, client) + if err != nil { + return err + } + + log.Infof(ctx, "Upload succeeded") + f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) + + // Update all tasks that reference this file. + for _, lib := range libs { + wsfsBase := "/Workspace" + remotePath := path.Join(wsfsBase, f.RemotePath) + if lib.Whl != "" { + lib.Whl = remotePath + continue + } + if lib.Jar != "" { + lib.Jar = remotePath + continue } - log.Infof(ctx, "Upload succeeded") - f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) } } - a.NormalisePaths() return nil } diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index 279a8f3b7..dbf327fa0 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -3,11 +3,9 @@ package config import ( "context" "fmt" - "path" "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/exec" - "github.com/databricks/databricks-sdk-go/service/compute" ) type Artifacts map[string]*Artifact @@ -23,9 +21,8 @@ type ArtifactType string const ArtifactPythonWheel ArtifactType = `whl` type ArtifactFile struct { - Source string `json:"source"` - RemotePath string `json:"-" bundle:"readonly"` - Libraries []*compute.Library `json:"-" bundle:"readonly"` + Source string `json:"source"` + RemotePath string `json:"remote_path" bundle:"readonly"` } // Artifact defines a single local code artifact that can be @@ -65,36 +62,3 @@ func (a *Artifact) Build(ctx context.Context) ([]byte, error) { } return e.Exec(ctx, a.BuildCommand) } - -func (a *Artifact) NormalisePaths() { - for _, f := range a.Files { - // If no libraries attached, nothing to normalise, skipping - if f.Libraries == nil { - continue - } - - wsfsBase := "/Workspace" - remotePath := path.Join(wsfsBase, f.RemotePath) - for i := range f.Libraries { - lib := f.Libraries[i] - if lib.Whl != "" { - lib.Whl = remotePath - continue - } - if lib.Jar != "" { - lib.Jar = remotePath - continue - } - } - - } -} - -// This function determines if artifact files needs to be uploaded. -// During the bundle processing we analyse which library uses which artifact file. -// If artifact file is used as a library, we store the reference to this library in artifact file Libraries field. -// If artifact file has libraries it's been used in, it means than we need to upload this file. -// Otherwise this artifact file is not used and we skip uploading -func (af *ArtifactFile) NeedsUpload() bool { - return af.Libraries != nil -} diff --git a/bundle/libraries/helpers.go b/bundle/libraries/helpers.go new file mode 100644 index 000000000..89679c91a --- /dev/null +++ b/bundle/libraries/helpers.go @@ -0,0 +1,16 @@ +package libraries + +import "github.com/databricks/databricks-sdk-go/service/compute" + +func libraryPath(library *compute.Library) string { + if library.Whl != "" { + return library.Whl + } + if library.Jar != "" { + return library.Jar + } + if library.Egg != "" { + return library.Egg + } + return "" +} diff --git a/bundle/libraries/helpers_test.go b/bundle/libraries/helpers_test.go new file mode 100644 index 000000000..adc20a246 --- /dev/null +++ b/bundle/libraries/helpers_test.go @@ -0,0 +1,17 @@ +package libraries + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" +) + +func TestLibraryPath(t *testing.T) { + path := "/some/path" + + assert.Equal(t, path, libraryPath(&compute.Library{Whl: path})) + assert.Equal(t, path, libraryPath(&compute.Library{Jar: path})) + assert.Equal(t, path, libraryPath(&compute.Library{Egg: path})) + assert.Equal(t, "", libraryPath(&compute.Library{})) +} diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index 548d5ef1b..e0cb3fa38 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -3,46 +3,16 @@ package libraries import ( "context" "fmt" - "net/url" - "path" "path/filepath" - "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" ) -type match struct { -} - -func MatchWithArtifacts() bundle.Mutator { - return &match{} -} - -func (a *match) Name() string { - return "libraries.MatchWithArtifacts" -} - -func (a *match) Apply(ctx context.Context, b *bundle.Bundle) error { - tasks := findAllTasks(b) - for _, task := range tasks { - if isMissingRequiredLibraries(task) { - return fmt.Errorf("task '%s' is missing required libraries. Please include your package code in task libraries block", task.TaskKey) - } - for j := range task.Libraries { - lib := &task.Libraries[j] - err := findArtifactsAndMarkForUpload(ctx, lib, b) - if err != nil { - return err - } - } - } - return nil -} - func findAllTasks(b *bundle.Bundle) []*jobs.Task { r := b.Config.Resources result := make([]*jobs.Task, 0) @@ -71,7 +41,7 @@ func FindAllWheelTasksWithLocalLibraries(b *bundle.Bundle) []*jobs.Task { func IsTaskWithLocalLibraries(task *jobs.Task) bool { for _, l := range task.Libraries { - if isLocalLibrary(&l) { + if IsLocalLibrary(&l) { return true } } @@ -81,8 +51,7 @@ func IsTaskWithLocalLibraries(task *jobs.Task) bool { func IsTaskWithWorkspaceLibraries(task *jobs.Task) bool { for _, l := range task.Libraries { - path := libPath(&l) - if isWorkspacePath(path) { + if IsWorkspaceLibrary(&l) { return true } } @@ -90,16 +59,8 @@ func IsTaskWithWorkspaceLibraries(task *jobs.Task) bool { return false } -func isMissingRequiredLibraries(task *jobs.Task) bool { - if task.Libraries != nil { - return false - } - - return task.PythonWheelTask != nil || task.SparkJarTask != nil -} - func findLibraryMatches(lib *compute.Library, b *bundle.Bundle) ([]string, error) { - path := libPath(lib) + path := libraryPath(lib) if path == "" { return nil, nil } @@ -108,26 +69,27 @@ func findLibraryMatches(lib *compute.Library, b *bundle.Bundle) ([]string, error return filepath.Glob(fullPath) } -func findArtifactsAndMarkForUpload(ctx context.Context, lib *compute.Library, b *bundle.Bundle) error { +func findArtifactFiles(ctx context.Context, lib *compute.Library, b *bundle.Bundle) ([]*config.ArtifactFile, error) { matches, err := findLibraryMatches(lib, b) if err != nil { - return err + return nil, err } - if len(matches) == 0 && isLocalLibrary(lib) { - return fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libPath(lib)) + if len(matches) == 0 && IsLocalLibrary(lib) { + return nil, fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libraryPath(lib)) } + var out []*config.ArtifactFile for _, match := range matches { af, err := findArtifactFileByLocalPath(match, b) if err != nil { cmdio.LogString(ctx, fmt.Sprintf("%s. Skipping uploading. In order to use the define 'artifacts' section", err.Error())) } else { - af.Libraries = append(af.Libraries, lib) + out = append(out, af) } } - return nil + return out, nil } func findArtifactFileByLocalPath(path string, b *bundle.Bundle) (*config.ArtifactFile, error) { @@ -142,67 +104,27 @@ func findArtifactFileByLocalPath(path string, b *bundle.Bundle) (*config.Artifac return nil, fmt.Errorf("artifact section is not defined for file at %s", path) } -func libPath(library *compute.Library) string { - if library.Whl != "" { - return library.Whl - } - if library.Jar != "" { - return library.Jar - } - if library.Egg != "" { - return library.Egg +func MapFilesToTaskLibraries(ctx context.Context, b *bundle.Bundle) map[string][]*compute.Library { + tasks := findAllTasks(b) + out := make(map[string][]*compute.Library) + for _, task := range tasks { + for j := range task.Libraries { + lib := &task.Libraries[j] + if !IsLocalLibrary(lib) { + continue + } + + matches, err := findLibraryMatches(lib, b) + if err != nil { + log.Warnf(ctx, "Error matching library to files: %s", err.Error()) + continue + } + + for _, match := range matches { + out[match] = append(out[match], lib) + } + } } - return "" -} - -func isLocalLibrary(library *compute.Library) bool { - path := libPath(library) - if path == "" { - return false - } - - return IsLocalPath(path) -} - -func IsLocalPath(path string) bool { - if isExplicitFileScheme(path) { - return true - } - - if isRemoteStorageScheme(path) { - return false - } - - return !isAbsoluteRemotePath(path) -} - -func isExplicitFileScheme(path string) bool { - return strings.HasPrefix(path, "file://") -} - -func isRemoteStorageScheme(path string) bool { - url, err := url.Parse(path) - if err != nil { - return false - } - - if url.Scheme == "" { - return false - } - - // If the path starts with scheme:/ format, it's a correct remote storage scheme - return strings.HasPrefix(path, url.Scheme+":/") - -} - -func isWorkspacePath(path string) bool { - return strings.HasPrefix(path, "/Workspace/") || - strings.HasPrefix(path, "/Users/") || - strings.HasPrefix(path, "/Shared/") -} - -func isAbsoluteRemotePath(p string) bool { - // If path for library starts with /, it's a remote absolute path - return path.IsAbs(p) + return out } diff --git a/bundle/libraries/libraries_test.go b/bundle/libraries/libraries_test.go index 41609bd4e..0bec2c6d0 100644 --- a/bundle/libraries/libraries_test.go +++ b/bundle/libraries/libraries_test.go @@ -1,31 +1,88 @@ package libraries import ( - "fmt" + "context" + "path/filepath" "testing" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/stretchr/testify/require" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" ) -var testCases map[string]bool = map[string]bool{ - "./some/local/path": true, - "/some/full/path": false, - "/Workspace/path/to/package": false, - "/Users/path/to/package": false, - "file://path/to/package": true, - "C:\\path\\to\\package": true, - "dbfs://path/to/package": false, - "dbfs:/path/to/package": false, - "s3://path/to/package": false, - "abfss://path/to/package": false, -} - -func TestIsLocalLbrary(t *testing.T) { - for p, result := range testCases { - lib := compute.Library{ - Whl: p, - } - require.Equal(t, result, isLocalLibrary(&lib), fmt.Sprintf("isLocalLibrary must return %t for path %s ", result, p)) +func TestMapFilesToTaskLibrariesNoGlob(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Path: "testdata", + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: "library1", + }, + { + Whl: "library2", + }, + { + Whl: "/absolute/path/in/workspace/library3", + }, + }, + }, + { + Libraries: []compute.Library{ + { + Whl: "library1", + }, + { + Whl: "library2", + }, + }, + }, + }, + }, + }, + "job2": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: "library1", + }, + { + Whl: "library2", + }, + }, + }, + }, + }, + }, + }, + }, + }, } + + out := MapFilesToTaskLibraries(context.Background(), b) + assert.Len(t, out, 2) + + // Pointer equality for "library1" + assert.Equal(t, []*compute.Library{ + &b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[0], + &b.Config.Resources.Jobs["job1"].JobSettings.Tasks[1].Libraries[0], + &b.Config.Resources.Jobs["job2"].JobSettings.Tasks[0].Libraries[0], + }, out[filepath.Clean("testdata/library1")]) + + // Pointer equality for "library2" + assert.Equal(t, []*compute.Library{ + &b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[1], + &b.Config.Resources.Jobs["job1"].JobSettings.Tasks[1].Libraries[1], + &b.Config.Resources.Jobs["job2"].JobSettings.Tasks[0].Libraries[1], + }, out[filepath.Clean("testdata/library2")]) } diff --git a/bundle/libraries/local_path.go b/bundle/libraries/local_path.go new file mode 100644 index 000000000..a5c0cc969 --- /dev/null +++ b/bundle/libraries/local_path.go @@ -0,0 +1,63 @@ +package libraries + +import ( + "net/url" + "path" + "strings" + + "github.com/databricks/databricks-sdk-go/service/compute" +) + +// IsLocalPath returns true if the specified path indicates that +// it should be interpreted as a path on the local file system. +// +// The following paths are considered local: +// +// - myfile.txt +// - ./myfile.txt +// - ../myfile.txt +// - file:///foo/bar/myfile.txt +// +// The following paths are considered remote: +// +// - dbfs:/mnt/myfile.txt +// - s3:/mybucket/myfile.txt +// - /Users/jane@doe.com/myfile.txt +func IsLocalPath(p string) bool { + // If the path has the explicit file scheme, it's a local path. + if strings.HasPrefix(p, "file://") { + return true + } + + // If the path has another scheme, it's a remote path. + if isRemoteStorageScheme(p) { + return false + } + + // If path starts with /, it's a remote absolute path + return !path.IsAbs(p) +} + +func isRemoteStorageScheme(path string) bool { + url, err := url.Parse(path) + if err != nil { + return false + } + + if url.Scheme == "" { + return false + } + + // If the path starts with scheme:/ format, it's a correct remote storage scheme + return strings.HasPrefix(path, url.Scheme+":/") +} + +// IsLocalLibrary returns true if the specified library refers to a local path. +func IsLocalLibrary(library *compute.Library) bool { + path := libraryPath(library) + if path == "" { + return false + } + + return IsLocalPath(path) +} diff --git a/bundle/libraries/local_path_test.go b/bundle/libraries/local_path_test.go new file mode 100644 index 000000000..640afa85b --- /dev/null +++ b/bundle/libraries/local_path_test.go @@ -0,0 +1,43 @@ +package libraries + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" +) + +func TestIsLocalPath(t *testing.T) { + // Relative paths, paths with the file scheme, and Windows paths. + assert.True(t, IsLocalPath("./some/local/path")) + assert.True(t, IsLocalPath("file://path/to/package")) + assert.True(t, IsLocalPath("C:\\path\\to\\package")) + assert.True(t, IsLocalPath("myfile.txt")) + assert.True(t, IsLocalPath("./myfile.txt")) + assert.True(t, IsLocalPath("../myfile.txt")) + assert.True(t, IsLocalPath("file:///foo/bar/myfile.txt")) + + // Absolute paths. + assert.False(t, IsLocalPath("/some/full/path")) + assert.False(t, IsLocalPath("/Workspace/path/to/package")) + assert.False(t, IsLocalPath("/Users/path/to/package")) + + // Paths with schemes. + assert.False(t, IsLocalPath("dbfs://path/to/package")) + assert.False(t, IsLocalPath("dbfs:/path/to/package")) + assert.False(t, IsLocalPath("s3://path/to/package")) + assert.False(t, IsLocalPath("abfss://path/to/package")) +} + +func TestIsLocalLibrary(t *testing.T) { + // Local paths. + assert.True(t, IsLocalLibrary(&compute.Library{Whl: "./file.whl"})) + assert.True(t, IsLocalLibrary(&compute.Library{Jar: "../target/some.jar"})) + + // Non-local paths. + assert.False(t, IsLocalLibrary(&compute.Library{Whl: "/Workspace/path/to/file.whl"})) + assert.False(t, IsLocalLibrary(&compute.Library{Jar: "s3:/bucket/path/some.jar"})) + + // Empty. + assert.False(t, IsLocalLibrary(&compute.Library{})) +} diff --git a/bundle/libraries/match.go b/bundle/libraries/match.go new file mode 100644 index 000000000..c8fd2baec --- /dev/null +++ b/bundle/libraries/match.go @@ -0,0 +1,45 @@ +package libraries + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +type match struct { +} + +func MatchWithArtifacts() bundle.Mutator { + return &match{} +} + +func (a *match) Name() string { + return "libraries.MatchWithArtifacts" +} + +func (a *match) Apply(ctx context.Context, b *bundle.Bundle) error { + tasks := findAllTasks(b) + for _, task := range tasks { + if isMissingRequiredLibraries(task) { + return fmt.Errorf("task '%s' is missing required libraries. Please include your package code in task libraries block", task.TaskKey) + } + for j := range task.Libraries { + lib := &task.Libraries[j] + _, err := findArtifactFiles(ctx, lib, b) + if err != nil { + return err + } + } + } + return nil +} + +func isMissingRequiredLibraries(task *jobs.Task) bool { + if task.Libraries != nil { + return false + } + + return task.PythonWheelTask != nil || task.SparkJarTask != nil +} diff --git a/bundle/libraries/match_test.go b/bundle/libraries/match_test.go new file mode 100644 index 000000000..828c65640 --- /dev/null +++ b/bundle/libraries/match_test.go @@ -0,0 +1 @@ +package libraries diff --git a/bundle/libraries/testdata/library1 b/bundle/libraries/testdata/library1 new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/libraries/testdata/library2 b/bundle/libraries/testdata/library2 new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/libraries/workspace_path.go b/bundle/libraries/workspace_path.go new file mode 100644 index 000000000..b08ca1616 --- /dev/null +++ b/bundle/libraries/workspace_path.go @@ -0,0 +1,38 @@ +package libraries + +import ( + "strings" + + "github.com/databricks/databricks-sdk-go/service/compute" +) + +// IsWorkspacePath returns true if the specified path indicates that +// it should be interpreted as a Databricks Workspace path. +// +// The following paths are considered workspace paths: +// +// - /Workspace/Users/jane@doe.com/myfile +// - /Users/jane@doe.com/myfile +// - /Shared/project/myfile +// +// The following paths are not considered workspace paths: +// +// - myfile.txt +// - ./myfile.txt +// - ../myfile.txt +// - /foo/bar/myfile.txt +func IsWorkspacePath(path string) bool { + return strings.HasPrefix(path, "/Workspace/") || + strings.HasPrefix(path, "/Users/") || + strings.HasPrefix(path, "/Shared/") +} + +// IsWorkspaceLibrary returns true if the specified library refers to a workspace path. +func IsWorkspaceLibrary(library *compute.Library) bool { + path := libraryPath(library) + if path == "" { + return false + } + + return IsWorkspacePath(path) +} diff --git a/bundle/libraries/workspace_path_test.go b/bundle/libraries/workspace_path_test.go new file mode 100644 index 000000000..feaaab7f7 --- /dev/null +++ b/bundle/libraries/workspace_path_test.go @@ -0,0 +1,33 @@ +package libraries + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" +) + +func TestIsWorkspacePath(t *testing.T) { + // Absolute paths with particular prefixes. + assert.True(t, IsWorkspacePath("/Workspace/path/to/package")) + assert.True(t, IsWorkspacePath("/Users/path/to/package")) + assert.True(t, IsWorkspacePath("/Shared/path/to/package")) + + // Relative paths. + assert.False(t, IsWorkspacePath("myfile.txt")) + assert.False(t, IsWorkspacePath("./myfile.txt")) + assert.False(t, IsWorkspacePath("../myfile.txt")) +} + +func TestIsWorkspaceLibrary(t *testing.T) { + // Workspace paths. + assert.True(t, IsWorkspaceLibrary(&compute.Library{Whl: "/Workspace/path/to/file.whl"})) + + // Non-workspace paths. + assert.False(t, IsWorkspaceLibrary(&compute.Library{Whl: "./file.whl"})) + assert.False(t, IsWorkspaceLibrary(&compute.Library{Jar: "../target/some.jar"})) + assert.False(t, IsWorkspaceLibrary(&compute.Library{Jar: "s3:/bucket/path/some.jar"})) + + // Empty. + assert.False(t, IsWorkspaceLibrary(&compute.Library{})) +} diff --git a/bundle/tests/bundle/wheel_test.go b/bundle/tests/bundle/wheel_test.go index 57ecb54b9..5171241f4 100644 --- a/bundle/tests/bundle/wheel_test.go +++ b/bundle/tests/bundle/wheel_test.go @@ -84,5 +84,4 @@ func TestBundlePythonWheelBuildNoBuildJustUpload(t *testing.T) { "package", "my_test_code-0.0.1-py3-none-any.whl", )) - require.True(t, artifact.Files[0].NeedsUpload()) } diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 71f91fded..549b393d2 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -1,7 +1,6 @@ package bundle import ( - "context" "os" "path" "path/filepath" @@ -11,9 +10,11 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/internal" - "github.com/databricks/databricks-sdk-go" + "github.com/databricks/cli/internal/acc" "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" ) @@ -26,8 +27,8 @@ func touchEmptyFile(t *testing.T, path string) { } func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { - t.Log(internal.GetEnvOrSkipTest(t, "CLOUD_ENV")) - + ctx, wt := acc.WorkspaceTest(t) + w := wt.W dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") touchEmptyFile(t, whlPath) @@ -37,14 +38,10 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { Files: []config.ArtifactFile{ { Source: whlPath, - Libraries: []*compute.Library{ - {Whl: "dist\\test.whl"}, - }, }, }, } - w := databricks.Must(databricks.NewWorkspaceClient()) wsDir := internal.TemporaryWorkspaceDir(t, w) b := &bundle.Bundle{ @@ -59,11 +56,33 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { Artifacts: config.Artifacts{ "test": artifact, }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: "dist/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, }, } - err := bundle.Apply(context.Background(), b, artifacts.BasicUpload("test")) + err := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) require.NoError(t, err) + + // The remote path attribute on the artifact file should have been set. require.Regexp(t, regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), artifact.Files[0].RemotePath) - require.Regexp(t, regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), artifact.Files[0].Libraries[0].Whl) + + // The task library path should have been updated to the remote path. + lib := b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0] + require.Regexp(t, regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), lib.Whl) } From 20e45b87aef99ea092333df61cf6ea77d610f408 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 5 Feb 2024 17:54:41 +0100 Subject: [PATCH 002/286] Harden `dyn.Value` equality check (#1173) ## Changes This function could panic when either side of the comparison is a nil or empty slice. This logic is triggered when comparing the input value to the output value when calling `dyn.Map`. ## Tests Unit tests. --- libs/dyn/value.go | 17 +++++++++++--- libs/dyn/visit_map_test.go | 46 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 3 deletions(-) diff --git a/libs/dyn/value.go b/libs/dyn/value.go index a487e13e1..e9c22bfbe 100644 --- a/libs/dyn/value.go +++ b/libs/dyn/value.go @@ -150,11 +150,22 @@ func (v Value) eq(w Value) bool { // This is safe because we don't allow maps to be mutated. return &v.v == &w.v case KindSequence: - // Compare pointers to the underlying slice and slice length. - // This is safe because we don't allow slices to be mutated. vs := v.v.([]Value) ws := w.v.([]Value) - return &vs[0] == &ws[0] && len(vs) == len(ws) + lv := len(vs) + lw := len(ws) + // If both slices are empty, they are equal. + if lv == 0 && lw == 0 { + return true + } + // If they have different lengths, they are not equal. + if lv != lw { + return false + } + // They are both non-empty and have the same length. + // Compare pointers to the underlying slice. + // This is safe because we don't allow slices to be mutated. + return &vs[0] == &ws[0] default: return v.v == w.v } diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index a5af3411f..117d03f0a 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -74,6 +74,29 @@ func TestMapFuncOnMap(t *testing.T) { assert.ErrorIs(t, err, ref) } +func TestMapFuncOnMapWithEmptySequence(t *testing.T) { + variants := []dyn.Value{ + // empty sequence + dyn.V([]dyn.Value{}), + // non-empty sequence + dyn.V([]dyn.Value{dyn.V(42)}), + } + + for i := 0; i < len(variants); i++ { + vin := dyn.V(map[string]dyn.Value{ + "key": variants[i], + }) + + for j := 0; j < len(variants); j++ { + vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("key")), func(v dyn.Value) (dyn.Value, error) { + return variants[j], nil + }) + assert.NoError(t, err) + assert.Equal(t, variants[j], vout.Get("key")) + } + } +} + func TestMapFuncOnSequence(t *testing.T) { vin := dyn.V([]dyn.Value{ dyn.V(42), @@ -115,6 +138,29 @@ func TestMapFuncOnSequence(t *testing.T) { assert.ErrorIs(t, err, ref) } +func TestMapFuncOnSequenceWithEmptySequence(t *testing.T) { + variants := []dyn.Value{ + // empty sequence + dyn.V([]dyn.Value{}), + // non-empty sequence + dyn.V([]dyn.Value{dyn.V(42)}), + } + + for i := 0; i < len(variants); i++ { + vin := dyn.V([]dyn.Value{ + variants[i], + }) + + for j := 0; j < len(variants); j++ { + vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(v dyn.Value) (dyn.Value, error) { + return variants[j], nil + }) + assert.NoError(t, err) + assert.Equal(t, variants[j], vout.Index(0)) + } + } +} + func TestMapForeachOnMap(t *testing.T) { vin := dyn.V(map[string]dyn.Value{ "foo": dyn.V(42), From 4131069a4b8163cbfed1cd641a9cbbc33a7d81ac Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 6 Feb 2024 18:15:08 +0530 Subject: [PATCH 003/286] Filter current user from resource permissions (#1145) ## Changes The databricks terraform provider does not allow changing permission of the current user. Instead, the current identity is implictly set to be the owner of all resources on the platform side. This PR introduces a mutator to filter permissions from the bundle configuration, allowing users to define permissions for their own identities in their bundle config. This would allow configurations like, allowing both alice and bob to collaborate on the same DAB: ``` permissions: level: CAN_MANAGE user_name: alice level: CAN_MANAGE user_name: bob ``` ## Tests Unit test and manually --- bundle/permissions/filter.go | 80 +++++++++++++++ bundle/permissions/filter_test.go | 157 ++++++++++++++++++++++++++++++ bundle/phases/initialize.go | 1 + 3 files changed, 238 insertions(+) create mode 100644 bundle/permissions/filter.go create mode 100644 bundle/permissions/filter_test.go diff --git a/bundle/permissions/filter.go b/bundle/permissions/filter.go new file mode 100644 index 000000000..2916f5fb0 --- /dev/null +++ b/bundle/permissions/filter.go @@ -0,0 +1,80 @@ +package permissions + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dyn" + dync "github.com/databricks/cli/libs/dyn/convert" +) + +type filterCurrentUser struct{} + +// The databricks terraform provider does not allow changing the permissions of +// current user. The current user is implied to be the owner of all deployed resources. +// This mutator removes the current user from the permissions of all resources. +func FilterCurrentUser() bundle.Mutator { + return &filterCurrentUser{} +} + +func (m *filterCurrentUser) Name() string { + return "FilterCurrentUserFromPermissions" +} + +func filter(currentUser string) dyn.WalkValueFunc { + return func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + // Permissions are defined at top level of a resource. We can skip walking + // after a depth of 4. + // [resource_type].[resource_name].[permissions].[array_index] + // Example: pipelines.foo.permissions.0 + if len(p) > 4 { + return v, dyn.ErrSkip + } + + // We can skip walking at a depth of 3 if the key is not "permissions". + // Example: pipelines.foo.libraries + if len(p) == 3 && p[2] != dyn.Key("permissions") { + return v, dyn.ErrSkip + } + + // We want to be at the level of an individual permission to check it's + // user_name and service_principal_name fields. + if len(p) != 4 || p[2] != dyn.Key("permissions") { + return v, nil + } + + // Filter if the user_name matches the current user + userName, ok := v.Get("user_name").AsString() + if ok && userName == currentUser { + return v, dyn.ErrDrop + } + + // Filter if the service_principal_name matches the current user + servicePrincipalName, ok := v.Get("service_principal_name").AsString() + if ok && servicePrincipalName == currentUser { + return v, dyn.ErrDrop + } + + return v, nil + } +} + +func (m *filterCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error { + rv, err := dync.FromTyped(b.Config.Resources, dyn.NilValue) + if err != nil { + return err + } + + currentUser := b.Config.Workspace.CurrentUser.UserName + nv, err := dyn.Walk(rv, filter(currentUser)) + if err != nil { + return err + } + + err = dync.ToTyped(&b.Config.Resources, nv) + if err != nil { + return err + } + + return nil +} diff --git a/bundle/permissions/filter_test.go b/bundle/permissions/filter_test.go new file mode 100644 index 000000000..fa0125696 --- /dev/null +++ b/bundle/permissions/filter_test.go @@ -0,0 +1,157 @@ +package permissions + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/assert" +) + +var alice = resources.Permission{ + Level: CAN_MANAGE, + UserName: "alice@databricks.com", +} + +var bob = resources.Permission{ + Level: CAN_VIEW, + UserName: "bob@databricks.com", +} + +var robot = resources.Permission{ + Level: CAN_RUN, + ServicePrincipalName: "i-Robot", +} + +func testFixture(userName string) *bundle.Bundle { + p := []resources.Permission{ + alice, + bob, + robot, + } + + return &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + CurrentUser: &config.User{ + User: &iam.User{ + UserName: userName, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + Permissions: p, + }, + "job2": { + Permissions: p, + }, + }, + Pipelines: map[string]*resources.Pipeline{ + "pipeline1": { + Permissions: p, + }, + }, + Experiments: map[string]*resources.MlflowExperiment{ + "experiment1": { + Permissions: p, + }, + }, + Models: map[string]*resources.MlflowModel{ + "model1": { + Permissions: p, + }, + }, + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "endpoint1": { + Permissions: p, + }, + }, + RegisteredModels: map[string]*resources.RegisteredModel{ + "registered_model1": { + Grants: []resources.Grant{ + { + Principal: "abc", + }, + }, + }, + }, + }, + }, + } + +} + +func TestFilterCurrentUser(t *testing.T) { + b := testFixture("alice@databricks.com") + + err := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, err) + + // Assert current user is filtered out. + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions)) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions)) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions)) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions)) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob) + + // Assert there's no change to the grant. + assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants)) +} + +func TestFilterCurrentServicePrincipal(t *testing.T) { + b := testFixture("i-Robot") + + err := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, err) + + // Assert current user is filtered out. + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions)) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions)) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions)) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions)) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob) + + // Assert there's no change to the grant. + assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants)) +} diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index e0558d937..bf20ff33a 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -39,6 +39,7 @@ func Initialize() bundle.Mutator { mutator.TranslatePaths(), python.WrapperWarning(), permissions.ApplyBundlePermissions(), + permissions.FilterCurrentUser(), metadata.AnnotateJobs(), terraform.Initialize(), scripts.Execute(config.ScriptPostInit), From 2bbb644749f919f9b2a4b97972505a0ae4bfd21c Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 6 Feb 2024 15:51:02 +0100 Subject: [PATCH 004/286] Group bundle run flags by job and pipeline types (#1174) ## Changes Group bundle run flags by job and pipeline types ## Tests ``` Run a resource (e.g. a job or a pipeline) Usage: databricks bundle run [flags] KEY Job Flags: --dbt-commands strings A list of commands to execute for jobs with DBT tasks. --jar-params strings A list of parameters for jobs with Spark JAR tasks. --notebook-params stringToString A map from keys to values for jobs with notebook tasks. (default []) --params stringToString comma separated k=v pairs for job parameters (default []) --pipeline-params stringToString A map from keys to values for jobs with pipeline tasks. (default []) --python-named-params stringToString A map from keys to values for jobs with Python wheel tasks. (default []) --python-params strings A list of parameters for jobs with Python tasks. --spark-submit-params strings A list of parameters for jobs with Spark submit tasks. --sql-params stringToString A map from keys to values for jobs with SQL tasks. (default []) Pipeline Flags: --full-refresh strings List of tables to reset and recompute. --full-refresh-all Perform a full graph reset and recompute. --refresh strings List of tables to update. --refresh-all Perform a full graph update. Flags: -h, --help help for run --no-wait Don't wait for the run to complete. Global Flags: --debug enable debug logging -o, --output type output type: text or json (default text) -p, --profile string ~/.databrickscfg profile -t, --target string bundle target to use (if applicable) --var strings set values for variables defined in bundle config. Example: --var="foo=bar" ``` --- bundle/run/job_options.go | 10 ++-- bundle/run/job_options_test.go | 3 +- bundle/run/options.go | 18 ++++++-- cmd/bundle/run.go | 2 +- libs/cmdgroup/command.go | 83 ++++++++++++++++++++++++++++++++++ libs/cmdgroup/command_test.go | 51 +++++++++++++++++++++ libs/cmdgroup/template.go | 14 ++++++ 7 files changed, 170 insertions(+), 11 deletions(-) create mode 100644 libs/cmdgroup/command.go create mode 100644 libs/cmdgroup/command_test.go create mode 100644 libs/cmdgroup/template.go diff --git a/bundle/run/job_options.go b/bundle/run/job_options.go index 209591d76..c359e79eb 100644 --- a/bundle/run/job_options.go +++ b/bundle/run/job_options.go @@ -27,8 +27,11 @@ type JobOptions struct { jobParams map[string]string } -func (o *JobOptions) Define(fs *flag.FlagSet) { - // Define task parameters flags. +func (o *JobOptions) DefineJobOptions(fs *flag.FlagSet) { + fs.StringToStringVar(&o.jobParams, "params", nil, "comma separated k=v pairs for job parameters") +} + +func (o *JobOptions) DefineTaskOptions(fs *flag.FlagSet) { fs.StringSliceVar(&o.dbtCommands, "dbt-commands", nil, "A list of commands to execute for jobs with DBT tasks.") fs.StringSliceVar(&o.jarParams, "jar-params", nil, "A list of parameters for jobs with Spark JAR tasks.") fs.StringToStringVar(&o.notebookParams, "notebook-params", nil, "A map from keys to values for jobs with notebook tasks.") @@ -37,9 +40,6 @@ func (o *JobOptions) Define(fs *flag.FlagSet) { fs.StringSliceVar(&o.pythonParams, "python-params", nil, "A list of parameters for jobs with Python tasks.") fs.StringSliceVar(&o.sparkSubmitParams, "spark-submit-params", nil, "A list of parameters for jobs with Spark submit tasks.") fs.StringToStringVar(&o.sqlParams, "sql-params", nil, "A map from keys to values for jobs with SQL tasks.") - - // Define job parameters flag. - fs.StringToStringVar(&o.jobParams, "params", nil, "comma separated k=v pairs for job parameters") } func (o *JobOptions) hasTaskParametersConfigured() bool { diff --git a/bundle/run/job_options_test.go b/bundle/run/job_options_test.go index 822771d8e..08e18d95d 100644 --- a/bundle/run/job_options_test.go +++ b/bundle/run/job_options_test.go @@ -13,7 +13,8 @@ import ( func setupJobOptions(t *testing.T) (*flag.FlagSet, *JobOptions) { var fs flag.FlagSet var opts JobOptions - opts.Define(&fs) + opts.DefineJobOptions(&fs) + opts.DefineTaskOptions(&fs) return &fs, &opts } diff --git a/bundle/run/options.go b/bundle/run/options.go index 3194fb328..580612d0e 100644 --- a/bundle/run/options.go +++ b/bundle/run/options.go @@ -1,7 +1,8 @@ package run import ( - flag "github.com/spf13/pflag" + "github.com/databricks/cli/libs/cmdgroup" + "github.com/spf13/cobra" ) type Options struct { @@ -10,7 +11,16 @@ type Options struct { NoWait bool } -func (o *Options) Define(fs *flag.FlagSet) { - o.Job.Define(fs) - o.Pipeline.Define(fs) +func (o *Options) Define(cmd *cobra.Command) { + wrappedCmd := cmdgroup.NewCommandWithGroupFlag(cmd) + jobGroup := wrappedCmd.AddFlagGroup("Job") + o.Job.DefineJobOptions(jobGroup.FlagSet()) + + jobTaskGroup := wrappedCmd.AddFlagGroup("Job Task") + jobTaskGroup.SetDescription(`Note: please prefer use of job-level parameters (--param) over task-level parameters. + For more information, see https://docs.databricks.com/en/workflows/jobs/create-run-jobs.html#pass-parameters-to-a-databricks-job-task`) + o.Job.DefineTaskOptions(jobTaskGroup.FlagSet()) + + pipelineGroup := wrappedCmd.AddFlagGroup("Pipeline") + o.Pipeline.Define(pipelineGroup.FlagSet()) } diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index c9e35aa3b..a4b106588 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -24,7 +24,7 @@ func newRunCommand() *cobra.Command { } var runOptions run.Options - runOptions.Define(cmd.Flags()) + runOptions.Define(cmd) var noWait bool cmd.Flags().BoolVar(&noWait, "no-wait", false, "Don't wait for the run to complete.") diff --git a/libs/cmdgroup/command.go b/libs/cmdgroup/command.go new file mode 100644 index 000000000..19c9af16a --- /dev/null +++ b/libs/cmdgroup/command.go @@ -0,0 +1,83 @@ +package cmdgroup + +import ( + "io" + "strings" + "text/template" + "unicode" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type CommandWithGroupFlag struct { + cmd *cobra.Command + flagGroups []*FlagGroup +} + +func (c *CommandWithGroupFlag) Command() *cobra.Command { + return c.cmd +} + +func (c *CommandWithGroupFlag) FlagGroups() []*FlagGroup { + return c.flagGroups +} + +func NewCommandWithGroupFlag(cmd *cobra.Command) *CommandWithGroupFlag { + cmdWithFlagGroups := &CommandWithGroupFlag{cmd: cmd, flagGroups: make([]*FlagGroup, 0)} + cmd.SetUsageFunc(func(c *cobra.Command) error { + err := tmpl(c.OutOrStderr(), c.UsageTemplate(), cmdWithFlagGroups) + if err != nil { + c.PrintErrln(err) + } + return nil + }) + cmd.SetUsageTemplate(usageTemplate) + return cmdWithFlagGroups +} + +func (c *CommandWithGroupFlag) AddFlagGroup(name string) *FlagGroup { + fg := &FlagGroup{name: name, flagSet: pflag.NewFlagSet(name, pflag.ContinueOnError)} + c.flagGroups = append(c.flagGroups, fg) + return fg +} + +type FlagGroup struct { + name string + description string + flagSet *pflag.FlagSet +} + +func (c *FlagGroup) Name() string { + return c.name +} + +func (c *FlagGroup) Description() string { + return c.description +} + +func (c *FlagGroup) SetDescription(description string) { + c.description = description +} + +func (c *FlagGroup) FlagSet() *pflag.FlagSet { + return c.flagSet +} + +var templateFuncs = template.FuncMap{ + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "trimTrailingWhitespaces": trimRightSpace, +} + +func trimRightSpace(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} diff --git a/libs/cmdgroup/command_test.go b/libs/cmdgroup/command_test.go new file mode 100644 index 000000000..2eae31d14 --- /dev/null +++ b/libs/cmdgroup/command_test.go @@ -0,0 +1,51 @@ +package cmdgroup + +import ( + "bytes" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" +) + +func TestCommandFlagGrouping(t *testing.T) { + cmd := &cobra.Command{ + Use: "test [flags]", + Short: "test command", + Run: func(cmd *cobra.Command, args []string) { + // Do nothing + }, + } + + wrappedCmd := NewCommandWithGroupFlag(cmd) + jobGroup := wrappedCmd.AddFlagGroup("Job") + fs := jobGroup.FlagSet() + fs.String("job-name", "", "Name of the job") + fs.String("job-type", "", "Type of the job") + + pipelineGroup := wrappedCmd.AddFlagGroup("Pipeline") + fs = pipelineGroup.FlagSet() + fs.String("pipeline-name", "", "Name of the pipeline") + fs.String("pipeline-type", "", "Type of the pipeline") + + cmd.Flags().BoolP("bool", "b", false, "Bool flag") + + buf := bytes.NewBuffer(nil) + cmd.SetOutput(buf) + cmd.Usage() + + expected := `Usage: + test [flags] + +Job Flags: + --job-name string Name of the job + --job-type string Type of the job + +Pipeline Flags: + --pipeline-name string Name of the pipeline + --pipeline-type string Type of the pipeline + +Flags: + -b, --bool Bool flag` + require.Equal(t, expected, buf.String()) +} diff --git a/libs/cmdgroup/template.go b/libs/cmdgroup/template.go new file mode 100644 index 000000000..aac967b0e --- /dev/null +++ b/libs/cmdgroup/template.go @@ -0,0 +1,14 @@ +package cmdgroup + +const usageTemplate = `Usage:{{if .Command.Runnable}} + {{.Command.UseLine}}{{end}} +{{range .FlagGroups}} +{{.Name}} Flags:{{if not (eq .Description "")}} + {{.Description}}{{end}} +{{.FlagSet.FlagUsages | trimTrailingWhitespaces}} +{{end}} +{{if .Command.HasAvailableLocalFlags}}Flags: +{{.Command.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .Command.HasAvailableInheritedFlags}} + +Global Flags: +{{.Command.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}` From f54e790a3b64ca327a225886be60b8b2bd105059 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 6 Feb 2024 16:01:49 +0100 Subject: [PATCH 005/286] Ensure every variable reference is passed to lookup function (#1176) ## Changes References to keys that themselves are also variable references were shortcircuited in the previous approach. This meant that certain fields were resolved even if the lookup function would have instructed to skip resolution. To fix this we separate the memoization of resolved variable references from the memoization of lookups. Now, every variable reference is passed through the lookup function. ## Tests Before this change, the new test failed with: ``` === RUN TestResolveWithSkipEverything [...]/libs/dyn/dynvar/resolve_test.go:208: Error Trace: [...]/libs/dyn/dynvar/resolve_test.go:208 Error: Not equal: expected: "${d} ${c} ${c} ${d}" actual : "${b} ${a} ${a} ${b}" Diff: --- Expected +++ Actual @@ -1 +1 @@ -${d} ${c} ${c} ${d} +${b} ${a} ${a} ${b} Test: TestResolveWithSkipEverything ``` --- libs/dyn/dynvar/resolve.go | 72 +++++++++++++++++++++++---------- libs/dyn/dynvar/resolve_test.go | 25 ++++++++++++ 2 files changed, 75 insertions(+), 22 deletions(-) diff --git a/libs/dyn/dynvar/resolve.go b/libs/dyn/dynvar/resolve.go index b4e119b6d..b5417cac2 100644 --- a/libs/dyn/dynvar/resolve.go +++ b/libs/dyn/dynvar/resolve.go @@ -38,12 +38,20 @@ func Resolve(in dyn.Value, fn Lookup) (out dyn.Value, err error) { return resolver{in: in, fn: fn}.run() } +type lookupResult struct { + v dyn.Value + err error +} + type resolver struct { in dyn.Value fn Lookup refs map[string]ref resolved map[string]dyn.Value + + // Memoization for lookups. + lookups map[string]lookupResult } func (r resolver) run() (out dyn.Value, err error) { @@ -84,8 +92,10 @@ func (r *resolver) collectVariableReferences() (err error) { } func (r *resolver) resolveVariableReferences() (err error) { - // Initialize map for resolved variables. - // We use this for memoization. + // Initialize cache for lookups. + r.lookups = make(map[string]lookupResult) + + // Initialize cache for resolved variable references. r.resolved = make(map[string]dyn.Value) // Resolve each variable reference (in order). @@ -95,7 +105,7 @@ func (r *resolver) resolveVariableReferences() (err error) { keys := maps.Keys(r.refs) sort.Strings(keys) for _, key := range keys { - _, err := r.resolve(key, []string{key}) + _, err := r.resolveRef(key, r.refs[key], []string{key}) if err != nil { return err } @@ -104,29 +114,12 @@ func (r *resolver) resolveVariableReferences() (err error) { return nil } -func (r *resolver) resolve(key string, seen []string) (dyn.Value, error) { +func (r *resolver) resolveRef(key string, ref ref, seen []string) (dyn.Value, error) { // Check if we have already resolved this variable reference. if v, ok := r.resolved[key]; ok { return v, nil } - ref, ok := r.refs[key] - if !ok { - // Perform lookup in the input. - p, err := dyn.NewPathFromString(key) - if err != nil { - return dyn.InvalidValue, err - } - v, err := r.fn(p) - if err != nil && dyn.IsNoSuchKeyError(err) { - return dyn.InvalidValue, fmt.Errorf( - "reference does not exist: ${%s}", - key, - ) - } - return v, err - } - // This is an unresolved variable reference. deps := ref.references() @@ -143,7 +136,7 @@ func (r *resolver) resolve(key string, seen []string) (dyn.Value, error) { ) } - v, err := r.resolve(dep, append(seen, dep)) + v, err := r.resolveKey(dep, append(seen, dep)) // If we should skip resolution of this key, index j will hold an invalid [dyn.Value]. if errors.Is(err, ErrSkipResolution) { @@ -191,6 +184,41 @@ func (r *resolver) resolve(key string, seen []string) (dyn.Value, error) { return v, nil } +func (r *resolver) resolveKey(key string, seen []string) (dyn.Value, error) { + // Check if we have already looked up this key. + if v, ok := r.lookups[key]; ok { + return v.v, v.err + } + + // Parse the key into a path. + p, err := dyn.NewPathFromString(key) + if err != nil { + return dyn.InvalidValue, err + } + + // Look up the value for the given key. + v, err := r.fn(p) + if err != nil { + if dyn.IsNoSuchKeyError(err) { + err = fmt.Errorf("reference does not exist: ${%s}", key) + } + + // Cache the return value and return to the caller. + r.lookups[key] = lookupResult{v: dyn.InvalidValue, err: err} + return dyn.InvalidValue, err + } + + // If the returned value is a valid variable reference, resolve it. + ref, ok := newRef(v) + if ok { + v, err = r.resolveRef(key, ref, seen) + } + + // Cache the return value and return to the caller. + r.lookups[key] = lookupResult{v: v, err: err} + return v, err +} + func (r *resolver) replaceVariableReferences() (dyn.Value, error) { // Walk the input and replace all variable references. return dyn.Walk(r.in, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { diff --git a/libs/dyn/dynvar/resolve_test.go b/libs/dyn/dynvar/resolve_test.go index ba700503e..1234b7cbf 100644 --- a/libs/dyn/dynvar/resolve_test.go +++ b/libs/dyn/dynvar/resolve_test.go @@ -182,3 +182,28 @@ func TestResolveWithSkip(t *testing.T) { assert.Equal(t, "a ${b}", getByPath(t, out, "e").MustString()) assert.Equal(t, "${b} a a ${b}", getByPath(t, out, "f").MustString()) } + +func TestResolveWithSkipEverything(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "a": dyn.V("a"), + "b": dyn.V("b"), + "c": dyn.V("${a}"), + "d": dyn.V("${b}"), + "e": dyn.V("${a} ${b}"), + "f": dyn.V("${b} ${a} ${a} ${b}"), + "g": dyn.V("${d} ${c} ${c} ${d}"), + }) + + // The call must not replace anything if the lookup function returns ErrSkipResolution. + out, err := dynvar.Resolve(in, func(path dyn.Path) (dyn.Value, error) { + return dyn.InvalidValue, dynvar.ErrSkipResolution + }) + require.NoError(t, err) + assert.Equal(t, "a", getByPath(t, out, "a").MustString()) + assert.Equal(t, "b", getByPath(t, out, "b").MustString()) + assert.Equal(t, "${a}", getByPath(t, out, "c").MustString()) + assert.Equal(t, "${b}", getByPath(t, out, "d").MustString()) + assert.Equal(t, "${a} ${b}", getByPath(t, out, "e").MustString()) + assert.Equal(t, "${b} ${a} ${a} ${b}", getByPath(t, out, "f").MustString()) + assert.Equal(t, "${d} ${c} ${c} ${d}", getByPath(t, out, "g").MustString()) +} From 6e075e8cf85a33ef4e7ce796ee354e6903870975 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 7 Feb 2024 10:22:44 +0100 Subject: [PATCH 006/286] Revert "Filter current user from resource permissions (#1145)" (#1179) ## Changes This reverts commit 4131069a4b8163cbfed1cd641a9cbbc33a7d81ac. The integration test for metadata computation failed. The back and forth to `dyn.Value` erases unexported fields that the code currently still depends on. We'll have to retry on top of #1098. --- bundle/permissions/filter.go | 80 --------------- bundle/permissions/filter_test.go | 157 ------------------------------ bundle/phases/initialize.go | 1 - 3 files changed, 238 deletions(-) delete mode 100644 bundle/permissions/filter.go delete mode 100644 bundle/permissions/filter_test.go diff --git a/bundle/permissions/filter.go b/bundle/permissions/filter.go deleted file mode 100644 index 2916f5fb0..000000000 --- a/bundle/permissions/filter.go +++ /dev/null @@ -1,80 +0,0 @@ -package permissions - -import ( - "context" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/libs/dyn" - dync "github.com/databricks/cli/libs/dyn/convert" -) - -type filterCurrentUser struct{} - -// The databricks terraform provider does not allow changing the permissions of -// current user. The current user is implied to be the owner of all deployed resources. -// This mutator removes the current user from the permissions of all resources. -func FilterCurrentUser() bundle.Mutator { - return &filterCurrentUser{} -} - -func (m *filterCurrentUser) Name() string { - return "FilterCurrentUserFromPermissions" -} - -func filter(currentUser string) dyn.WalkValueFunc { - return func(p dyn.Path, v dyn.Value) (dyn.Value, error) { - // Permissions are defined at top level of a resource. We can skip walking - // after a depth of 4. - // [resource_type].[resource_name].[permissions].[array_index] - // Example: pipelines.foo.permissions.0 - if len(p) > 4 { - return v, dyn.ErrSkip - } - - // We can skip walking at a depth of 3 if the key is not "permissions". - // Example: pipelines.foo.libraries - if len(p) == 3 && p[2] != dyn.Key("permissions") { - return v, dyn.ErrSkip - } - - // We want to be at the level of an individual permission to check it's - // user_name and service_principal_name fields. - if len(p) != 4 || p[2] != dyn.Key("permissions") { - return v, nil - } - - // Filter if the user_name matches the current user - userName, ok := v.Get("user_name").AsString() - if ok && userName == currentUser { - return v, dyn.ErrDrop - } - - // Filter if the service_principal_name matches the current user - servicePrincipalName, ok := v.Get("service_principal_name").AsString() - if ok && servicePrincipalName == currentUser { - return v, dyn.ErrDrop - } - - return v, nil - } -} - -func (m *filterCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error { - rv, err := dync.FromTyped(b.Config.Resources, dyn.NilValue) - if err != nil { - return err - } - - currentUser := b.Config.Workspace.CurrentUser.UserName - nv, err := dyn.Walk(rv, filter(currentUser)) - if err != nil { - return err - } - - err = dync.ToTyped(&b.Config.Resources, nv) - if err != nil { - return err - } - - return nil -} diff --git a/bundle/permissions/filter_test.go b/bundle/permissions/filter_test.go deleted file mode 100644 index fa0125696..000000000 --- a/bundle/permissions/filter_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package permissions - -import ( - "context" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/resources" - "github.com/databricks/databricks-sdk-go/service/iam" - "github.com/stretchr/testify/assert" -) - -var alice = resources.Permission{ - Level: CAN_MANAGE, - UserName: "alice@databricks.com", -} - -var bob = resources.Permission{ - Level: CAN_VIEW, - UserName: "bob@databricks.com", -} - -var robot = resources.Permission{ - Level: CAN_RUN, - ServicePrincipalName: "i-Robot", -} - -func testFixture(userName string) *bundle.Bundle { - p := []resources.Permission{ - alice, - bob, - robot, - } - - return &bundle.Bundle{ - Config: config.Root{ - Workspace: config.Workspace{ - CurrentUser: &config.User{ - User: &iam.User{ - UserName: userName, - }, - }, - }, - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "job1": { - Permissions: p, - }, - "job2": { - Permissions: p, - }, - }, - Pipelines: map[string]*resources.Pipeline{ - "pipeline1": { - Permissions: p, - }, - }, - Experiments: map[string]*resources.MlflowExperiment{ - "experiment1": { - Permissions: p, - }, - }, - Models: map[string]*resources.MlflowModel{ - "model1": { - Permissions: p, - }, - }, - ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ - "endpoint1": { - Permissions: p, - }, - }, - RegisteredModels: map[string]*resources.RegisteredModel{ - "registered_model1": { - Grants: []resources.Grant{ - { - Principal: "abc", - }, - }, - }, - }, - }, - }, - } - -} - -func TestFilterCurrentUser(t *testing.T) { - b := testFixture("alice@databricks.com") - - err := bundle.Apply(context.Background(), b, FilterCurrentUser()) - assert.NoError(t, err) - - // Assert current user is filtered out. - assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) - assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, robot) - assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob) - - assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions)) - assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, robot) - assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob) - - assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions)) - assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, robot) - assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob) - - assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions)) - assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, robot) - assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob) - - assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions)) - assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, robot) - assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob) - - assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions)) - assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, robot) - assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob) - - // Assert there's no change to the grant. - assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants)) -} - -func TestFilterCurrentServicePrincipal(t *testing.T) { - b := testFixture("i-Robot") - - err := bundle.Apply(context.Background(), b, FilterCurrentUser()) - assert.NoError(t, err) - - // Assert current user is filtered out. - assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) - assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, alice) - assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob) - - assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions)) - assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, alice) - assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob) - - assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions)) - assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, alice) - assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob) - - assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions)) - assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, alice) - assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob) - - assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions)) - assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, alice) - assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob) - - assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions)) - assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, alice) - assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob) - - // Assert there's no change to the grant. - assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants)) -} diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index bf20ff33a..e0558d937 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -39,7 +39,6 @@ func Initialize() bundle.Mutator { mutator.TranslatePaths(), python.WrapperWarning(), permissions.ApplyBundlePermissions(), - permissions.FilterCurrentUser(), metadata.AnnotateJobs(), terraform.Initialize(), scripts.Execute(config.ScriptPostInit), From dcb9c852010ce83b4751f7d0205d4a4fa4ef9b12 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 7 Feb 2024 10:25:07 +0100 Subject: [PATCH 007/286] Empty struct should yield empty map in `convert.FromTyped` (#1177) ## Changes This was an issue in cases where the typed structure contains a non-nil pointer to an empty struct. After conversion to a `dyn.Value` and back to the typed structure, the pointer became nil. ## Tests Unit tests. --- libs/dyn/convert/from_typed.go | 5 ----- libs/dyn/convert/from_typed_test.go | 19 +++++++++++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index 75f1c7212..bd6b63670 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -84,11 +84,6 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { } } - // If the struct was equal to its zero value, emit a nil. - if len(out) == 0 { - return dyn.NilValue, nil - } - return dyn.NewValue(out, ref.Location()), nil } diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index d7fa60bb3..5fc2b90f6 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -19,6 +19,25 @@ func TestFromTypedStructZeroFields(t *testing.T) { nv, err := FromTyped(src, ref) require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) +} + +func TestFromTypedStructPointerZeroFields(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + // For an initialized pointer we expect an empty map. + src := &Tmp{} + nv, err := FromTyped(src, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) + + // For a nil pointer we expect nil. + src = nil + nv, err = FromTyped(src, dyn.NilValue) + require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) } From 0b5fdcc34625e4aa2b499e982fd49ecb089af585 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 7 Feb 2024 10:25:53 +0100 Subject: [PATCH 008/286] Zero destination struct in `convert.ToTyped` (#1178) ## Changes Not doing this means that the output struct is not a true representation of the `dyn.Value` and unrepresentable state (e.g. unexported fields) can be carried over across `convert.ToTyped` calls. ## Tests Unit tests. --- libs/dyn/convert/to_typed.go | 4 ++++ libs/dyn/convert/to_typed_test.go | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index 209de12cb..715d3f670 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -53,6 +53,10 @@ func ToTyped(dst any, src dyn.Value) error { func toTypedStruct(dst reflect.Value, src dyn.Value) error { switch src.Kind() { case dyn.KindMap: + // Zero the destination struct such that fields + // that aren't present in [src] are cleared. + dst.SetZero() + info := getStructInfo(dst.Type()) for k, v := range src.MustMap() { index, ok := info.Fields[k] diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index 3adc94c79..fd399b934 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -59,6 +59,27 @@ func TestToTypedStructOverwrite(t *testing.T) { assert.Equal(t, "baz", out.Bar) } +func TestToTypedStructClearFields(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar,omitempty"` + } + + // Struct value with non-empty fields. + var out = Tmp{ + Foo: "baz", + Bar: "qux", + } + + // Value is an empty map. + v := dyn.V(map[string]dyn.Value{}) + + // The previously set fields should be cleared. + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Equal(t, Tmp{}, out) +} + func TestToTypedStructAnonymousByValue(t *testing.T) { type Bar struct { Bar string `json:"bar"` From de363faa5347c2ce98c0377d8be931ff96fa8908 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 7 Feb 2024 11:27:13 +0100 Subject: [PATCH 009/286] Make sure grouped flags are added to the command flag set (#1180) ## Changes Make sure grouped flags are added to the command flag set ## Tests Added regression tests --- bundle/run/options.go | 12 ++++++++---- libs/cmdgroup/command.go | 31 ++++++++++++++++++++++++++++--- libs/cmdgroup/command_test.go | 12 ++++++++++-- libs/cmdgroup/template.go | 4 ++-- 4 files changed, 48 insertions(+), 11 deletions(-) diff --git a/bundle/run/options.go b/bundle/run/options.go index 580612d0e..4e50788a9 100644 --- a/bundle/run/options.go +++ b/bundle/run/options.go @@ -12,15 +12,19 @@ type Options struct { } func (o *Options) Define(cmd *cobra.Command) { - wrappedCmd := cmdgroup.NewCommandWithGroupFlag(cmd) - jobGroup := wrappedCmd.AddFlagGroup("Job") + jobGroup := cmdgroup.NewFlagGroup("Job") o.Job.DefineJobOptions(jobGroup.FlagSet()) - jobTaskGroup := wrappedCmd.AddFlagGroup("Job Task") + jobTaskGroup := cmdgroup.NewFlagGroup("Job Task") jobTaskGroup.SetDescription(`Note: please prefer use of job-level parameters (--param) over task-level parameters. For more information, see https://docs.databricks.com/en/workflows/jobs/create-run-jobs.html#pass-parameters-to-a-databricks-job-task`) o.Job.DefineTaskOptions(jobTaskGroup.FlagSet()) - pipelineGroup := wrappedCmd.AddFlagGroup("Pipeline") + pipelineGroup := cmdgroup.NewFlagGroup("Pipeline") o.Pipeline.Define(pipelineGroup.FlagSet()) + + wrappedCmd := cmdgroup.NewCommandWithGroupFlag(cmd) + wrappedCmd.AddFlagGroup(jobGroup) + wrappedCmd.AddFlagGroup(jobTaskGroup) + wrappedCmd.AddFlagGroup(pipelineGroup) } diff --git a/libs/cmdgroup/command.go b/libs/cmdgroup/command.go index 19c9af16a..a2a776935 100644 --- a/libs/cmdgroup/command.go +++ b/libs/cmdgroup/command.go @@ -23,6 +23,24 @@ func (c *CommandWithGroupFlag) FlagGroups() []*FlagGroup { return c.flagGroups } +func (c *CommandWithGroupFlag) NonGroupedFlags() *pflag.FlagSet { + nonGrouped := pflag.NewFlagSet("non-grouped", pflag.ContinueOnError) + c.cmd.LocalFlags().VisitAll(func(f *pflag.Flag) { + for _, fg := range c.flagGroups { + if fg.Has(f) { + return + } + } + nonGrouped.AddFlag(f) + }) + + return nonGrouped +} + +func (c *CommandWithGroupFlag) HasNonGroupedFlags() bool { + return c.NonGroupedFlags().HasFlags() +} + func NewCommandWithGroupFlag(cmd *cobra.Command) *CommandWithGroupFlag { cmdWithFlagGroups := &CommandWithGroupFlag{cmd: cmd, flagGroups: make([]*FlagGroup, 0)} cmd.SetUsageFunc(func(c *cobra.Command) error { @@ -36,10 +54,9 @@ func NewCommandWithGroupFlag(cmd *cobra.Command) *CommandWithGroupFlag { return cmdWithFlagGroups } -func (c *CommandWithGroupFlag) AddFlagGroup(name string) *FlagGroup { - fg := &FlagGroup{name: name, flagSet: pflag.NewFlagSet(name, pflag.ContinueOnError)} +func (c *CommandWithGroupFlag) AddFlagGroup(fg *FlagGroup) { c.flagGroups = append(c.flagGroups, fg) - return fg + c.cmd.Flags().AddFlagSet(fg.FlagSet()) } type FlagGroup struct { @@ -48,6 +65,10 @@ type FlagGroup struct { flagSet *pflag.FlagSet } +func NewFlagGroup(name string) *FlagGroup { + return &FlagGroup{name: name, flagSet: pflag.NewFlagSet(name, pflag.ContinueOnError)} +} + func (c *FlagGroup) Name() string { return c.name } @@ -64,6 +85,10 @@ func (c *FlagGroup) FlagSet() *pflag.FlagSet { return c.flagSet } +func (c *FlagGroup) Has(f *pflag.Flag) bool { + return c.flagSet.Lookup(f.Name) != nil +} + var templateFuncs = template.FuncMap{ "trim": strings.TrimSpace, "trimRightSpace": trimRightSpace, diff --git a/libs/cmdgroup/command_test.go b/libs/cmdgroup/command_test.go index 2eae31d14..9122c7809 100644 --- a/libs/cmdgroup/command_test.go +++ b/libs/cmdgroup/command_test.go @@ -18,15 +18,17 @@ func TestCommandFlagGrouping(t *testing.T) { } wrappedCmd := NewCommandWithGroupFlag(cmd) - jobGroup := wrappedCmd.AddFlagGroup("Job") + jobGroup := NewFlagGroup("Job") fs := jobGroup.FlagSet() fs.String("job-name", "", "Name of the job") fs.String("job-type", "", "Type of the job") + wrappedCmd.AddFlagGroup(jobGroup) - pipelineGroup := wrappedCmd.AddFlagGroup("Pipeline") + pipelineGroup := NewFlagGroup("Pipeline") fs = pipelineGroup.FlagSet() fs.String("pipeline-name", "", "Name of the pipeline") fs.String("pipeline-type", "", "Type of the pipeline") + wrappedCmd.AddFlagGroup(pipelineGroup) cmd.Flags().BoolP("bool", "b", false, "Bool flag") @@ -48,4 +50,10 @@ Pipeline Flags: Flags: -b, --bool Bool flag` require.Equal(t, expected, buf.String()) + + require.NotNil(t, cmd.Flags().Lookup("job-name")) + require.NotNil(t, cmd.Flags().Lookup("job-type")) + require.NotNil(t, cmd.Flags().Lookup("pipeline-name")) + require.NotNil(t, cmd.Flags().Lookup("pipeline-type")) + require.NotNil(t, cmd.Flags().Lookup("bool")) } diff --git a/libs/cmdgroup/template.go b/libs/cmdgroup/template.go index aac967b0e..5c1be48fb 100644 --- a/libs/cmdgroup/template.go +++ b/libs/cmdgroup/template.go @@ -7,8 +7,8 @@ const usageTemplate = `Usage:{{if .Command.Runnable}} {{.Description}}{{end}} {{.FlagSet.FlagUsages | trimTrailingWhitespaces}} {{end}} -{{if .Command.HasAvailableLocalFlags}}Flags: -{{.Command.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .Command.HasAvailableInheritedFlags}} +{{if .HasNonGroupedFlags}}Flags: +{{.NonGroupedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .Command.HasAvailableInheritedFlags}} Global Flags: {{.Command.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}` From b64e11304c30cedd1910be7eb3f58f26cbbfc36d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 7 Feb 2024 11:53:50 +0100 Subject: [PATCH 010/286] Fix integration test with invalid configuration (#1182) ## Changes The indentation mistake on the `path` field under `notebook` meant the pipeline had a single entry with a `nil` notebook field. This was allowed but incorrect. While working on the `dyn.Value` approach, this yielded a non-nil but zeroed `notebook` field and a failure to translate an empty path. ## Tests Correcting the indentation made the test fail because the file is not a notebook. I changed it to a `file` reference and the test now passes. --- .../deploy_then_remove_resources/template/resources.yml.tmpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl b/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl index b74344e4c..e3a676770 100644 --- a/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl +++ b/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl @@ -3,5 +3,5 @@ resources: bar: name: test-bundle-pipeline-{{.unique_id}} libraries: - - notebook: - path: "./foo.py" + - file: + path: "./foo.py" From 6edab932337ce9d2bd76303b9375278e7933c222 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 7 Feb 2024 12:17:17 +0100 Subject: [PATCH 011/286] Added warning when trying to deploy bundle with `--fail-if-running` and running resources (#1163) ## Changes Deploying bundle when there are bundle resources running at the same time can be disruptive for jobs and pipelines in progress. With this change during deployment phase (before uploading any resources) if there is `--fail-if-running` specified DABs will check if there are any resources running and if so, will fail the deployment ## Tests Manual + add tests --- bundle/config/bundle.go | 6 +- bundle/config/deployment.go | 10 ++ bundle/deploy/check_running_resources.go | 143 ++++++++++++++++++ bundle/deploy/check_running_resources_test.go | 125 +++++++++++++++ bundle/deploy/lock/acquire.go | 4 +- bundle/deploy/lock/release.go | 2 +- bundle/phases/deploy.go | 4 +- cmd/bundle/deploy.go | 8 +- cmd/bundle/destroy.go | 2 +- 9 files changed, 295 insertions(+), 9 deletions(-) create mode 100644 bundle/config/deployment.go create mode 100644 bundle/deploy/check_running_resources.go create mode 100644 bundle/deploy/check_running_resources_test.go diff --git a/bundle/config/bundle.go b/bundle/config/bundle.go index 933e88bfa..21278151f 100644 --- a/bundle/config/bundle.go +++ b/bundle/config/bundle.go @@ -25,9 +25,6 @@ type Bundle struct { // For example, where to find the binary, which version to use, etc. Terraform *Terraform `json:"terraform,omitempty" bundle:"readonly"` - // Lock configures locking behavior on deployment. - Lock Lock `json:"lock" bundle:"readonly"` - // Force-override Git branch validation. Force bool `json:"force,omitempty" bundle:"readonly"` @@ -43,4 +40,7 @@ type Bundle struct { // Overrides the compute used for jobs and other supported assets. ComputeID string `json:"compute_id,omitempty"` + + // Deployment section specifies deployment related configuration for bundle + Deployment Deployment `json:"deployment"` } diff --git a/bundle/config/deployment.go b/bundle/config/deployment.go new file mode 100644 index 000000000..f89c7b3ee --- /dev/null +++ b/bundle/config/deployment.go @@ -0,0 +1,10 @@ +package config + +type Deployment struct { + // FailOnActiveRuns specifies whether to fail the deployment if there are + // running jobs or pipelines in the workspace. Defaults to false. + FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"` + + // Lock configures locking behavior on deployment. + Lock Lock `json:"lock" bundle:"readonly"` +} diff --git a/bundle/deploy/check_running_resources.go b/bundle/deploy/check_running_resources.go new file mode 100644 index 000000000..deb7775c6 --- /dev/null +++ b/bundle/deploy/check_running_resources.go @@ -0,0 +1,143 @@ +package deploy + +import ( + "context" + "fmt" + "strconv" + + "github.com/databricks/cli/bundle" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/hashicorp/terraform-exec/tfexec" + tfjson "github.com/hashicorp/terraform-json" + "golang.org/x/sync/errgroup" +) + +type ErrResourceIsRunning struct { + resourceType string + resourceId string +} + +func (e ErrResourceIsRunning) Error() string { + return fmt.Sprintf("%s %s is running", e.resourceType, e.resourceId) +} + +type checkRunningResources struct { +} + +func (l *checkRunningResources) Name() string { + return "check-running-resources" +} + +func (l *checkRunningResources) Apply(ctx context.Context, b *bundle.Bundle) error { + if !b.Config.Bundle.Deployment.FailOnActiveRuns { + return nil + } + + tf := b.Terraform + if tf == nil { + return fmt.Errorf("terraform not initialized") + } + + err := tf.Init(ctx, tfexec.Upgrade(true)) + if err != nil { + return fmt.Errorf("terraform init: %w", err) + } + + state, err := b.Terraform.Show(ctx) + if err != nil { + return err + } + + err = checkAnyResourceRunning(ctx, b.WorkspaceClient(), state) + if err != nil { + return fmt.Errorf("deployment aborted, err: %w", err) + } + + return nil +} + +func CheckRunningResource() *checkRunningResources { + return &checkRunningResources{} +} + +func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient, state *tfjson.State) error { + if state.Values == nil || state.Values.RootModule == nil { + return nil + } + + errs, errCtx := errgroup.WithContext(ctx) + + for _, resource := range state.Values.RootModule.Resources { + // Limit to resources. + if resource.Mode != tfjson.ManagedResourceMode { + continue + } + + value, ok := resource.AttributeValues["id"] + if !ok { + continue + } + id, ok := value.(string) + if !ok { + continue + } + + switch resource.Type { + case "databricks_job": + errs.Go(func() error { + isRunning, err := IsJobRunning(errCtx, w, id) + // If there's an error retrieving the job, we assume it's not running + if err != nil { + return err + } + if isRunning { + return &ErrResourceIsRunning{resourceType: "job", resourceId: id} + } + return nil + }) + case "databricks_pipeline": + errs.Go(func() error { + isRunning, err := IsPipelineRunning(errCtx, w, id) + // If there's an error retrieving the pipeline, we assume it's not running + if err != nil { + return nil + } + if isRunning { + return &ErrResourceIsRunning{resourceType: "pipeline", resourceId: id} + } + return nil + }) + } + } + + return errs.Wait() +} + +func IsJobRunning(ctx context.Context, w *databricks.WorkspaceClient, jobId string) (bool, error) { + id, err := strconv.Atoi(jobId) + if err != nil { + return false, err + } + + runs, err := w.Jobs.ListRunsAll(ctx, jobs.ListRunsRequest{JobId: int64(id), ActiveOnly: true}) + if err != nil { + return false, err + } + + return len(runs) > 0, nil +} + +func IsPipelineRunning(ctx context.Context, w *databricks.WorkspaceClient, pipelineId string) (bool, error) { + resp, err := w.Pipelines.Get(ctx, pipelines.GetPipelineRequest{PipelineId: pipelineId}) + if err != nil { + return false, err + } + switch resp.State { + case pipelines.PipelineStateIdle, pipelines.PipelineStateFailed, pipelines.PipelineStateDeleted: + return false, nil + default: + return true, nil + } +} diff --git a/bundle/deploy/check_running_resources_test.go b/bundle/deploy/check_running_resources_test.go new file mode 100644 index 000000000..7dc1fb865 --- /dev/null +++ b/bundle/deploy/check_running_resources_test.go @@ -0,0 +1,125 @@ +package deploy + +import ( + "context" + "errors" + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" + tfjson "github.com/hashicorp/terraform-json" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestIsAnyResourceRunningWithEmptyState(t *testing.T) { + mock := mocks.NewMockWorkspaceClient(t) + state := &tfjson.State{} + err := checkAnyResourceRunning(context.Background(), mock.WorkspaceClient, state) + require.NoError(t, err) +} + +func TestIsAnyResourceRunningWithJob(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + state := &tfjson.State{ + Values: &tfjson.StateValues{ + RootModule: &tfjson.StateModule{ + Resources: []*tfjson.StateResource{ + { + Type: "databricks_job", + AttributeValues: map[string]interface{}{ + "id": "123", + }, + Mode: tfjson.ManagedResourceMode, + }, + }, + }, + }, + } + + jobsApi := m.GetMockJobsAPI() + jobsApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ + JobId: 123, + ActiveOnly: true, + }).Return([]jobs.BaseRun{ + {RunId: 1234}, + }, nil).Once() + + err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) + require.ErrorContains(t, err, "job 123 is running") + + jobsApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ + JobId: 123, + ActiveOnly: true, + }).Return([]jobs.BaseRun{}, nil).Once() + + err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) + require.NoError(t, err) +} + +func TestIsAnyResourceRunningWithPipeline(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + state := &tfjson.State{ + Values: &tfjson.StateValues{ + RootModule: &tfjson.StateModule{ + Resources: []*tfjson.StateResource{ + { + Type: "databricks_pipeline", + AttributeValues: map[string]interface{}{ + "id": "123", + }, + Mode: tfjson.ManagedResourceMode, + }, + }, + }, + }, + } + + pipelineApi := m.GetMockPipelinesAPI() + pipelineApi.EXPECT().Get(mock.Anything, pipelines.GetPipelineRequest{ + PipelineId: "123", + }).Return(&pipelines.GetPipelineResponse{ + PipelineId: "123", + State: pipelines.PipelineStateRunning, + }, nil).Once() + + err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) + require.ErrorContains(t, err, "pipeline 123 is running") + + pipelineApi.EXPECT().Get(mock.Anything, pipelines.GetPipelineRequest{ + PipelineId: "123", + }).Return(&pipelines.GetPipelineResponse{ + PipelineId: "123", + State: pipelines.PipelineStateIdle, + }, nil).Once() + err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) + require.NoError(t, err) +} + +func TestIsAnyResourceRunningWithAPIFailure(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + state := &tfjson.State{ + Values: &tfjson.StateValues{ + RootModule: &tfjson.StateModule{ + Resources: []*tfjson.StateResource{ + { + Type: "databricks_pipeline", + AttributeValues: map[string]interface{}{ + "id": "123", + }, + Mode: tfjson.ManagedResourceMode, + }, + }, + }, + }, + } + + pipelineApi := m.GetMockPipelinesAPI() + pipelineApi.EXPECT().Get(mock.Anything, pipelines.GetPipelineRequest{ + PipelineId: "123", + }).Return(nil, errors.New("API failure")).Once() + + err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) + require.NoError(t, err) +} diff --git a/bundle/deploy/lock/acquire.go b/bundle/deploy/lock/acquire.go index 1335f7800..69e6663fc 100644 --- a/bundle/deploy/lock/acquire.go +++ b/bundle/deploy/lock/acquire.go @@ -35,7 +35,7 @@ func (m *acquire) init(b *bundle.Bundle) error { func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error { // Return early if locking is disabled. - if !b.Config.Bundle.Lock.IsEnabled() { + if !b.Config.Bundle.Deployment.Lock.IsEnabled() { log.Infof(ctx, "Skipping; locking is disabled") return nil } @@ -45,7 +45,7 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - force := b.Config.Bundle.Lock.Force + force := b.Config.Bundle.Deployment.Lock.Force log.Infof(ctx, "Acquiring deployment lock (force: %v)", force) err = b.Locker.Lock(ctx, force) if err != nil { diff --git a/bundle/deploy/lock/release.go b/bundle/deploy/lock/release.go index 52d271943..68d4e0f93 100644 --- a/bundle/deploy/lock/release.go +++ b/bundle/deploy/lock/release.go @@ -30,7 +30,7 @@ func (m *release) Name() string { func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error { // Return early if locking is disabled. - if !b.Config.Bundle.Lock.IsEnabled() { + if !b.Config.Bundle.Deployment.Lock.IsEnabled() { log.Infof(ctx, "Skipping; locking is disabled") return nil } diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 20fe2e413..5c6575509 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/bundle/artifacts" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/bundle/deploy/lock" "github.com/databricks/cli/bundle/deploy/metadata" @@ -22,6 +23,8 @@ func Deploy() bundle.Mutator { lock.Acquire(), bundle.Defer( bundle.Seq( + terraform.StatePull(), + deploy.CheckRunningResource(), mutator.ValidateGitDetails(), libraries.MatchWithArtifacts(), artifacts.CleanUp(), @@ -31,7 +34,6 @@ func Deploy() bundle.Mutator { permissions.ApplyWorkspaceRootPermissions(), terraform.Interpolate(), terraform.Write(), - terraform.StatePull(), bundle.Defer( terraform.Apply(), bundle.Seq( diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index 8818bbbf4..a83c268bc 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -15,18 +15,24 @@ func newDeployCommand() *cobra.Command { var force bool var forceLock bool + var failOnActiveRuns bool var computeID string cmd.Flags().BoolVar(&force, "force", false, "Force-override Git branch validation.") cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.") + cmd.Flags().BoolVar(&failOnActiveRuns, "fail-on-active-runs", false, "Fail if there are running jobs or pipelines in the deployment.") cmd.Flags().StringVarP(&computeID, "compute-id", "c", "", "Override compute in the deployment with the given compute ID.") cmd.RunE = func(cmd *cobra.Command, args []string) error { b := bundle.Get(cmd.Context()) b.Config.Bundle.Force = force - b.Config.Bundle.Lock.Force = forceLock + b.Config.Bundle.Deployment.Lock.Force = forceLock b.Config.Bundle.ComputeID = computeID + if cmd.Flag("fail-on-active-runs").Changed { + b.Config.Bundle.Deployment.FailOnActiveRuns = failOnActiveRuns + } + return bundle.Apply(cmd.Context(), b, bundle.Seq( phases.Initialize(), phases.Build(), diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index 22d998abe..dad199bf9 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -30,7 +30,7 @@ func newDestroyCommand() *cobra.Command { b := bundle.Get(ctx) // If `--force-lock` is specified, force acquisition of the deployment lock. - b.Config.Bundle.Lock.Force = forceDestroy + b.Config.Bundle.Deployment.Lock.Force = forceDestroy // If `--auto-approve`` is specified, we skip confirmation checks b.AutoApprove = autoApprove From f8b0f783eaaf2e54962e53c9f2adcb6d5bb4a170 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 7 Feb 2024 12:18:56 +0100 Subject: [PATCH 012/286] Use `acc.WorkspaceTest` helper from bundle integration tests (#1181) ## Changes This helper: * Constructs a context * Constructs a `*databricks.WorkspaceClient` * Ensures required environment variables are present to run an integration test * Enables debugging integration tests from VS Code Debugging integration tests (from VS Code) is made possible by a prelude in the helper that checks if the calling process is a debug binary, and if so, sources environment variables from `~/..databricks/debug-env.json` (if present). ## Tests Integration tests still pass. --------- Co-authored-by: Andrew Nester --- .../deploy_then_remove_resources_test.go | 23 ++++------ internal/bundle/empty_bundle_test.go | 9 ++-- internal/bundle/generate_job_test.go | 43 +++++++++---------- internal/bundle/generate_pipeline_test.go | 40 ++++++++--------- internal/bundle/helpers.go | 18 ++++---- internal/bundle/job_metadata_test.go | 18 ++++---- internal/bundle/local_state_staleness_test.go | 24 +++++------ internal/bundle/python_wheel_test.go | 25 ++++------- 8 files changed, 91 insertions(+), 109 deletions(-) diff --git a/internal/bundle/deploy_then_remove_resources_test.go b/internal/bundle/deploy_then_remove_resources_test.go index 73860593c..72baf798c 100644 --- a/internal/bundle/deploy_then_remove_resources_test.go +++ b/internal/bundle/deploy_then_remove_resources_test.go @@ -1,38 +1,33 @@ package bundle import ( - "context" "os" "path/filepath" "testing" - "github.com/databricks/cli/internal" - "github.com/databricks/databricks-sdk-go" + "github.com/databricks/cli/internal/acc" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestAccBundleDeployThenRemoveResources(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, "deploy_then_remove_resources", map[string]any{ + bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ "unique_id": uniqueId, }) require.NoError(t, err) // deploy pipeline - err = deployBundle(t, bundleRoot) - require.NoError(t, err) - - w, err := databricks.NewWorkspaceClient() + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) // assert pipeline is created pipelineName := "test-bundle-pipeline-" + uniqueId - pipeline, err := w.Pipelines.GetByName(context.Background(), pipelineName) + pipeline, err := w.Pipelines.GetByName(ctx, pipelineName) require.NoError(t, err) assert.Equal(t, pipeline.Name, pipelineName) @@ -41,15 +36,15 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) { require.NoError(t, err) // deploy again - err = deployBundle(t, bundleRoot) + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) // assert pipeline is deleted - _, err = w.Pipelines.GetByName(context.Background(), pipelineName) + _, err = w.Pipelines.GetByName(ctx, pipelineName) assert.ErrorContains(t, err, "does not exist") t.Cleanup(func() { - err = destroyBundle(t, bundleRoot) + err = destroyBundle(t, ctx, bundleRoot) require.NoError(t, err) }) } diff --git a/internal/bundle/empty_bundle_test.go b/internal/bundle/empty_bundle_test.go index 9b39368f4..36883ae00 100644 --- a/internal/bundle/empty_bundle_test.go +++ b/internal/bundle/empty_bundle_test.go @@ -6,14 +6,13 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" "github.com/google/uuid" "github.com/stretchr/testify/require" ) func TestAccEmptyBundleDeploy(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, _ := acc.WorkspaceTest(t) // create empty bundle tmpDir := t.TempDir() @@ -27,11 +26,11 @@ func TestAccEmptyBundleDeploy(t *testing.T) { f.Close() // deploy empty bundle - err = deployBundle(t, tmpDir) + err = deployBundle(t, ctx, tmpDir) require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, tmpDir) + err = destroyBundle(t, ctx, tmpDir) require.NoError(t, err) }) } diff --git a/internal/bundle/generate_job_test.go b/internal/bundle/generate_job_test.go index e9445abc5..e6f157809 100644 --- a/internal/bundle/generate_job_test.go +++ b/internal/bundle/generate_job_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" @@ -20,23 +21,22 @@ import ( ) func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, wt := acc.WorkspaceTest(t) + gt := &generateJobTest{T: t, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, "with_includes", map[string]any{ + bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) require.NoError(t, err) - jobId := createTestJob(t) + jobId := gt.createTestJob(ctx) t.Cleanup(func() { - destroyJob(t, jobId) - require.NoError(t, err) + gt.destroyJob(ctx, jobId) }) t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunner(t, "bundle", "generate", "job", + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "job", "--existing-job-id", fmt.Sprint(jobId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) @@ -61,15 +61,22 @@ func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { require.Contains(t, generatedYaml, "spark_version: 13.3.x-scala2.12") require.Contains(t, generatedYaml, "num_workers: 1") - err = deployBundle(t, bundleRoot) + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) - err = destroyBundle(t, bundleRoot) + err = destroyBundle(t, ctx, bundleRoot) require.NoError(t, err) - } -func createTestJob(t *testing.T) int64 { +type generateJobTest struct { + T *testing.T + w *databricks.WorkspaceClient +} + +func (gt *generateJobTest) createTestJob(ctx context.Context) int64 { + t := gt.T + w := gt.w + var nodeTypeId string switch testutil.GetCloud(t) { case testutil.AWS: @@ -80,10 +87,6 @@ func createTestJob(t *testing.T) int64 { nodeTypeId = "n1-standard-4" } - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - ctx := context.Background() tmpdir := internal.TemporaryWorkspaceDir(t, w) f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -112,13 +115,9 @@ func createTestJob(t *testing.T) int64 { return resp.JobId } -func destroyJob(t *testing.T, jobId int64) { - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - ctx := context.Background() - err = w.Jobs.Delete(ctx, jobs.DeleteJob{ +func (gt *generateJobTest) destroyJob(ctx context.Context, jobId int64) { + err := gt.w.Jobs.Delete(ctx, jobs.DeleteJob{ JobId: jobId, }) - require.NoError(t, err) + require.NoError(gt.T, err) } diff --git a/internal/bundle/generate_pipeline_test.go b/internal/bundle/generate_pipeline_test.go index 7b2323e6a..0005e29fa 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/internal/bundle/generate_pipeline_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -18,23 +19,22 @@ import ( ) func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, wt := acc.WorkspaceTest(t) + gt := &generatePipelineTest{T: t, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, "with_includes", map[string]any{ + bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) require.NoError(t, err) - pipelineId := createTestPipeline(t) + pipelineId := gt.createTestPipeline(ctx) t.Cleanup(func() { - destroyPipeline(t, pipelineId) - require.NoError(t, err) + gt.destroyPipeline(ctx, pipelineId) }) t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunner(t, "bundle", "generate", "pipeline", + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "pipeline", "--existing-pipeline-id", fmt.Sprint(pipelineId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) @@ -61,18 +61,22 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { require.Contains(t, generatedYaml, "- file:") require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "test.py"))) - err = deployBundle(t, bundleRoot) + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) - err = destroyBundle(t, bundleRoot) + err = destroyBundle(t, ctx, bundleRoot) require.NoError(t, err) } -func createTestPipeline(t *testing.T) string { - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +type generatePipelineTest struct { + T *testing.T + w *databricks.WorkspaceClient +} + +func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) string { + t := gt.T + w := gt.w - ctx := context.Background() tmpdir := internal.TemporaryWorkspaceDir(t, w) f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -103,13 +107,9 @@ func createTestPipeline(t *testing.T) string { return resp.PipelineId } -func destroyPipeline(t *testing.T, pipelineId string) { - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - ctx := context.Background() - err = w.Pipelines.Delete(ctx, pipelines.DeletePipelineRequest{ +func (gt *generatePipelineTest) destroyPipeline(ctx context.Context, pipelineId string) { + err := gt.w.Pipelines.Delete(ctx, pipelines.DeletePipelineRequest{ PipelineId: pipelineId, }) - require.NoError(t, err) + require.NoError(gt.T, err) } diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 681edc2d6..2c2b2dac9 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -15,7 +15,7 @@ import ( "github.com/databricks/cli/libs/template" ) -func initTestTemplate(t *testing.T, templateName string, config map[string]any) (string, error) { +func initTestTemplate(t *testing.T, ctx context.Context, templateName string, config map[string]any) (string, error) { templateRoot := filepath.Join("bundles", templateName) bundleRoot := t.TempDir() @@ -24,7 +24,7 @@ func initTestTemplate(t *testing.T, templateName string, config map[string]any) return "", err } - ctx := root.SetWorkspaceClient(context.Background(), nil) + ctx = root.SetWorkspaceClient(ctx, nil) cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "bundles") ctx = cmdio.InContext(ctx, cmd) @@ -46,15 +46,14 @@ func writeConfigFile(t *testing.T, config map[string]any) (string, error) { return filepath, err } -func deployBundle(t *testing.T, path string) error { +func deployBundle(t *testing.T, ctx context.Context, path string) error { t.Setenv("BUNDLE_ROOT", path) - c := internal.NewCobraTestRunner(t, "bundle", "deploy", "--force-lock") + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") _, _, err := c.Run() return err } -func runResource(t *testing.T, path string, key string) (string, error) { - ctx := context.Background() +func runResource(t *testing.T, ctx context.Context, path string, key string) (string, error) { ctx = cmdio.NewContext(ctx, cmdio.Default()) c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "run", key) @@ -62,8 +61,7 @@ func runResource(t *testing.T, path string, key string) (string, error) { return stdout.String(), err } -func runResourceWithParams(t *testing.T, path string, key string, params ...string) (string, error) { - ctx := context.Background() +func runResourceWithParams(t *testing.T, ctx context.Context, path string, key string, params ...string) (string, error) { ctx = cmdio.NewContext(ctx, cmdio.Default()) args := make([]string, 0) @@ -74,9 +72,9 @@ func runResourceWithParams(t *testing.T, path string, key string, params ...stri return stdout.String(), err } -func destroyBundle(t *testing.T, path string) error { +func destroyBundle(t *testing.T, ctx context.Context, path string) error { t.Setenv("BUNDLE_ROOT", path) - c := internal.NewCobraTestRunner(t, "bundle", "destroy", "--auto-approve") + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "destroy", "--auto-approve") _, _, err := c.Run() return err } diff --git a/internal/bundle/job_metadata_test.go b/internal/bundle/job_metadata_test.go index 3e2bb7f03..0d8a431e4 100644 --- a/internal/bundle/job_metadata_test.go +++ b/internal/bundle/job_metadata_test.go @@ -12,23 +12,21 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/metadata" "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestAccJobsMetadataFile(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, "job_metadata", map[string]any{ + bundleRoot, err := initTestTemplate(t, ctx, "job_metadata", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": "13.2.x-snapshot-scala2.12", @@ -36,12 +34,12 @@ func TestAccJobsMetadataFile(t *testing.T) { require.NoError(t, err) // deploy bundle - err = deployBundle(t, bundleRoot) + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) // Cleanup the deployed bundle t.Cleanup(func() { - err = destroyBundle(t, bundleRoot) + err = destroyBundle(t, ctx, bundleRoot) require.NoError(t, err) }) diff --git a/internal/bundle/local_state_staleness_test.go b/internal/bundle/local_state_staleness_test.go index 06cfe0e0d..872ac8a8e 100644 --- a/internal/bundle/local_state_staleness_test.go +++ b/internal/bundle/local_state_staleness_test.go @@ -5,7 +5,8 @@ import ( "testing" "github.com/databricks/cli/internal" - "github.com/databricks/databricks-sdk-go" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/google/uuid" @@ -14,11 +15,8 @@ import ( ) func TestAccLocalStateStaleness(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W // The approach for this test is as follows: // 1) First deploy of bundle instance A @@ -27,10 +25,10 @@ func TestAccLocalStateStaleness(t *testing.T) { // Because of deploy (2), the locally cached state of bundle instance A should be stale. // Then for deploy (3), it must use the remote state over the stale local state. - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) uniqueId := uuid.New().String() initialize := func() string { - root, err := initTestTemplate(t, "basic", map[string]any{ + root, err := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, "spark_version": "13.2.x-snapshot-scala2.12", @@ -38,26 +36,28 @@ func TestAccLocalStateStaleness(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, root) + err = destroyBundle(t, ctx, root) require.NoError(t, err) }) return root } + var err error + bundleA := initialize() bundleB := initialize() // 1) Deploy bundle A - err = deployBundle(t, bundleA) + err = deployBundle(t, ctx, bundleA) require.NoError(t, err) // 2) Deploy bundle B - err = deployBundle(t, bundleB) + err = deployBundle(t, ctx, bundleB) require.NoError(t, err) // 3) Deploy bundle A again - err = deployBundle(t, bundleA) + err = deployBundle(t, ctx, bundleA) require.NoError(t, err) // Assert that there is only a single job in the workspace corresponding to this bundle. diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index c94ed93a3..fc14fd17b 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -4,24 +4,17 @@ import ( "testing" "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/require" ) func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bool) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, _ := acc.WorkspaceTest(t) - var nodeTypeId string - if env == "gcp" { - nodeTypeId = "n1-standard-4" - } else if env == "aws" { - nodeTypeId = "i3.xlarge" - } else { - nodeTypeId = "Standard_DS4_v2" - } - - bundleRoot, err := initTestTemplate(t, "python_wheel_task", map[string]any{ + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task", map[string]any{ "node_type_id": nodeTypeId, "unique_id": uuid.New().String(), "spark_version": sparkVersion, @@ -29,20 +22,20 @@ func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bo }) require.NoError(t, err) - err = deployBundle(t, bundleRoot) + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) t.Cleanup(func() { - destroyBundle(t, bundleRoot) + destroyBundle(t, ctx, bundleRoot) }) - out, err := runResource(t, bundleRoot, "some_other_job") + out, err := runResource(t, ctx, bundleRoot, "some_other_job") require.NoError(t, err) require.Contains(t, out, "Hello from my func") require.Contains(t, out, "Got arguments:") require.Contains(t, out, "['my_test_code', 'one', 'two']") - out, err = runResourceWithParams(t, bundleRoot, "some_other_job", "--python-params=param1,param2") + out, err = runResourceWithParams(t, ctx, bundleRoot, "some_other_job", "--python-params=param1,param2") require.NoError(t, err) require.Contains(t, out, "Hello from my func") require.Contains(t, out, "Got arguments:") From f6cdc75825aaec9df1752ed0826a61015c0162da Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 7 Feb 2024 16:05:03 +0100 Subject: [PATCH 013/286] Release v0.212.4 (#1183) Bundles: * Allow specifying executable in artifact section and skip bash from WSL ([#1169](https://github.com/databricks/cli/pull/1169)). * Added warning when trying to deploy bundle with `--fail-if-running` and running resources ([#1163](https://github.com/databricks/cli/pull/1163)). * Group bundle run flags by job and pipeline types ([#1174](https://github.com/databricks/cli/pull/1174)). * Make sure grouped flags are added to the command flag set ([#1180](https://github.com/databricks/cli/pull/1180)). * Add short_name helper function to bundle init templates ([#1167](https://github.com/databricks/cli/pull/1167)). Internal: * Fix dynamic representation of zero values in maps and slices ([#1154](https://github.com/databricks/cli/pull/1154)). * Refactor library to artifact matching to not use pointers ([#1172](https://github.com/databricks/cli/pull/1172)). * Harden `dyn.Value` equality check ([#1173](https://github.com/databricks/cli/pull/1173)). * Ensure every variable reference is passed to lookup function ([#1176](https://github.com/databricks/cli/pull/1176)). * Empty struct should yield empty map in `convert.FromTyped` ([#1177](https://github.com/databricks/cli/pull/1177)). * Zero destination struct in `convert.ToTyped` ([#1178](https://github.com/databricks/cli/pull/1178)). * Fix integration test with invalid configuration ([#1182](https://github.com/databricks/cli/pull/1182)). * Use `acc.WorkspaceTest` helper from bundle integration tests ([#1181](https://github.com/databricks/cli/pull/1181)). --- CHANGELOG.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3dc700a8f..037028c94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Version changelog +## 0.212.4 + +Bundles: + * Allow specifying executable in artifact section and skip bash from WSL ([#1169](https://github.com/databricks/cli/pull/1169)). + * Added warning when trying to deploy bundle with `--fail-if-running` and running resources ([#1163](https://github.com/databricks/cli/pull/1163)). + * Group bundle run flags by job and pipeline types ([#1174](https://github.com/databricks/cli/pull/1174)). + * Make sure grouped flags are added to the command flag set ([#1180](https://github.com/databricks/cli/pull/1180)). + * Add short_name helper function to bundle init templates ([#1167](https://github.com/databricks/cli/pull/1167)). + +Internal: + * Fix dynamic representation of zero values in maps and slices ([#1154](https://github.com/databricks/cli/pull/1154)). + * Refactor library to artifact matching to not use pointers ([#1172](https://github.com/databricks/cli/pull/1172)). + * Harden `dyn.Value` equality check ([#1173](https://github.com/databricks/cli/pull/1173)). + * Ensure every variable reference is passed to lookup function ([#1176](https://github.com/databricks/cli/pull/1176)). + * Empty struct should yield empty map in `convert.FromTyped` ([#1177](https://github.com/databricks/cli/pull/1177)). + * Zero destination struct in `convert.ToTyped` ([#1178](https://github.com/databricks/cli/pull/1178)). + * Fix integration test with invalid configuration ([#1182](https://github.com/databricks/cli/pull/1182)). + * Use `acc.WorkspaceTest` helper from bundle integration tests ([#1181](https://github.com/databricks/cli/pull/1181)). + ## 0.212.3 CLI: From 8e58e04e8ff3609d140f29bf0700709cc60eab36 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 7 Feb 2024 17:33:18 +0100 Subject: [PATCH 014/286] Move folders package into libs (#1184) ## Changes This is the last top-level package that doesn't need to be top-level. --- bundle/bundle.go | 2 +- bundle/root.go | 2 +- cmd/labs/project/installed.go | 2 +- {folders => libs/folders}/folders.go | 0 {folders => libs/folders}/folders_test.go | 2 +- libs/git/repository.go | 2 +- 6 files changed, 5 insertions(+), 5 deletions(-) rename {folders => libs/folders}/folders.go (100%) rename {folders => libs/folders}/folders_test.go (94%) diff --git a/bundle/bundle.go b/bundle/bundle.go index 9e21cb561..a178ea090 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -16,7 +16,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/bundle/metadata" - "github.com/databricks/cli/folders" + "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" diff --git a/bundle/root.go b/bundle/root.go index 7518bf5fc..efc21e0ca 100644 --- a/bundle/root.go +++ b/bundle/root.go @@ -7,7 +7,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" - "github.com/databricks/cli/folders" + "github.com/databricks/cli/libs/folders" ) // getRootEnv returns the value of the bundle root environment variable diff --git a/cmd/labs/project/installed.go b/cmd/labs/project/installed.go index 9a98a780c..fb349531b 100644 --- a/cmd/labs/project/installed.go +++ b/cmd/labs/project/installed.go @@ -8,8 +8,8 @@ import ( "os" "path/filepath" - "github.com/databricks/cli/folders" "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/log" ) diff --git a/folders/folders.go b/libs/folders/folders.go similarity index 100% rename from folders/folders.go rename to libs/folders/folders.go diff --git a/folders/folders_test.go b/libs/folders/folders_test.go similarity index 94% rename from folders/folders_test.go rename to libs/folders/folders_test.go index 9aa387070..17afc4022 100644 --- a/folders/folders_test.go +++ b/libs/folders/folders_test.go @@ -13,7 +13,7 @@ func TestFindDirWithLeaf(t *testing.T) { wd, err := os.Getwd() require.NoError(t, err) - root := filepath.Join(wd, "..") + root := filepath.Join(wd, "..", "..") // Find from working directory should work. { diff --git a/libs/git/repository.go b/libs/git/repository.go index d1641118f..531fd74e4 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -7,7 +7,7 @@ import ( "path/filepath" "strings" - "github.com/databricks/cli/folders" + "github.com/databricks/cli/libs/folders" ) const gitIgnoreFileName = ".gitignore" From b1b5ad8acdc00ed963c74336d5a3fadd06b41f4d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 8 Feb 2024 12:10:52 +0100 Subject: [PATCH 015/286] Log time it takes for profile to load (#1186) ## Changes Aids debugging why `auth profiles` may take longer than expected. ## Tests Confirmed manually that timing information shows up in the log output. --- cmd/auth/profiles.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/auth/profiles.go b/cmd/auth/profiles.go index 51ae9b185..7fdcb8f29 100644 --- a/cmd/auth/profiles.go +++ b/cmd/auth/profiles.go @@ -6,9 +6,11 @@ import ( "net/http" "os" "sync" + "time" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" "github.com/spf13/cobra" @@ -117,8 +119,10 @@ func newProfilesCommand() *cobra.Command { } wg.Add(1) go func() { - // load more information about profile - profile.Load(cmd.Context(), skipValidate) + ctx := cmd.Context() + t := time.Now() + profile.Load(ctx, skipValidate) + log.Debugf(ctx, "Profile %q took %s to load", profile.Name, time.Since(t)) wg.Done() }() profiles = append(profiles, profile) From f7d1a5862d2f83463786244482bfda1102294a8a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 8 Feb 2024 13:23:14 +0100 Subject: [PATCH 016/286] Use allowlist for Git-related fields to include in metadata (#1187) ## Changes When new fields are added they should not automatically propagate to the bundle metadata. ## Tests Test passes. --- bundle/deploy/metadata/compute.go | 10 ++++++++-- bundle/deploy/metadata/compute_test.go | 4 ++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/bundle/deploy/metadata/compute.go b/bundle/deploy/metadata/compute.go index 460a81c93..c612d33a3 100644 --- a/bundle/deploy/metadata/compute.go +++ b/bundle/deploy/metadata/compute.go @@ -6,6 +6,7 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/metadata" ) @@ -25,8 +26,13 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) error { Config: metadata.Config{}, } - // Set git details in metadata - b.Metadata.Config.Bundle.Git = b.Config.Bundle.Git + // Set Git details in metadata + b.Metadata.Config.Bundle.Git = config.Git{ + Branch: b.Config.Bundle.Git.Branch, + OriginURL: b.Config.Bundle.Git.OriginURL, + Commit: b.Config.Bundle.Git.Commit, + BundleRootPath: b.Config.Bundle.Git.BundleRootPath, + } // Set job config paths in metadata jobsMetadata := make(map[string]*metadata.Job) diff --git a/bundle/deploy/metadata/compute_test.go b/bundle/deploy/metadata/compute_test.go index c3cb029d1..a1a97aab3 100644 --- a/bundle/deploy/metadata/compute_test.go +++ b/bundle/deploy/metadata/compute_test.go @@ -30,6 +30,7 @@ func TestComputeMetadataMutator(t *testing.T) { OriginURL: "www.host.com", Commit: "abcd", BundleRootPath: "a/b/c/d", + Inferred: true, }, }, Resources: config.Resources{ @@ -76,6 +77,9 @@ func TestComputeMetadataMutator(t *testing.T) { OriginURL: "www.host.com", Commit: "abcd", BundleRootPath: "a/b/c/d", + + // Test that this field doesn't carry over into the metadata. + Inferred: false, }, }, Resources: metadata.Resources{ From a835a3e564b5c5174936cb5a053dd5ad71a89698 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 8 Feb 2024 13:25:51 +0100 Subject: [PATCH 017/286] Ignore environment variables for `auth profiles` (#1189) ## Changes If environment variables related to unified authentication are set and a user runs `auth profiles`, the environment variables will interfere with the output. This change only takes profile data into account for the output. ## Tests Added a unit test. --- cmd/auth/profiles.go | 11 ++++++---- cmd/auth/profiles_test.go | 45 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 4 deletions(-) create mode 100644 cmd/auth/profiles_test.go diff --git a/cmd/auth/profiles.go b/cmd/auth/profiles.go index 7fdcb8f29..7c4a7ab2f 100644 --- a/cmd/auth/profiles.go +++ b/cmd/auth/profiles.go @@ -31,8 +31,10 @@ func (c *profileMetadata) IsEmpty() bool { } func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { - // TODO: disable config loaders other than configfile - cfg := &config.Config{Profile: c.Name} + cfg := &config.Config{ + Loaders: []config.Loader{config.ConfigFile}, + Profile: c.Name, + } _ = cfg.EnsureResolved() if cfg.IsAws() { c.Cloud = "aws" @@ -49,6 +51,7 @@ func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { if err != nil { return } + c.Host = cfg.Host c.AuthType = cfg.AuthType return } @@ -59,6 +62,7 @@ func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { return } _, err = a.Workspaces.List(ctx) + c.Host = cfg.Host c.AuthType = cfg.AuthType if err != nil { return @@ -70,14 +74,13 @@ func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { return } _, err = w.CurrentUser.Me(ctx) + c.Host = cfg.Host c.AuthType = cfg.AuthType if err != nil { return } c.Valid = true } - // set host again, this time normalized - c.Host = cfg.Host } func newProfilesCommand() *cobra.Command { diff --git a/cmd/auth/profiles_test.go b/cmd/auth/profiles_test.go new file mode 100644 index 000000000..c1971705f --- /dev/null +++ b/cmd/auth/profiles_test.go @@ -0,0 +1,45 @@ +package auth + +import ( + "context" + "path/filepath" + "runtime" + "testing" + + "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/databricks-sdk-go/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestProfiles(t *testing.T) { + ctx := context.Background() + dir := t.TempDir() + configFile := filepath.Join(dir, ".databrickscfg") + + // Create a config file with a profile + err := databrickscfg.SaveToProfile(ctx, &config.Config{ + ConfigFile: configFile, + Profile: "profile1", + Host: "https://abc.cloud.databricks.com", + Token: "token1", + }) + require.NoError(t, err) + + // Let the environment think we're using another profile + t.Setenv("DATABRICKS_HOST", "https://def.cloud.databricks.com") + t.Setenv("HOME", dir) + if runtime.GOOS == "windows" { + t.Setenv("USERPROFILE", dir) + } + + // Load the profile + profile := &profileMetadata{Name: "profile1"} + profile.Load(ctx, true) + + // Check the profile + assert.Equal(t, "profile1", profile.Name) + assert.Equal(t, "https://abc.cloud.databricks.com", profile.Host) + assert.Equal(t, "aws", profile.Cloud) + assert.Equal(t, "pat", profile.AuthType) +} From d638262665776aeff098603f6d51e652f9f9c32a Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 8 Feb 2024 18:22:53 +0530 Subject: [PATCH 018/286] Add spinner when downloading templates for bundle init (#1188) ## Changes Templates can take a long time to download. This PR adds a spinner to give feedback to users. ## Tests Manually https://github.com/databricks/cli/assets/88374338/b453982c-3233-40f4-8d6f-f31606ff0195 --- cmd/bundle/init.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index db8250d07..47d78f7dc 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -191,12 +191,19 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf if err != nil { return err } + + // start the spinner + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "Downloading the template\n" + // TODO: Add automated test that the downloaded git repo is cleaned up. // Clone the repository in the temporary directory err = git.Clone(ctx, templatePath, ref, repoDir) + close(promptSpinner) if err != nil { return err } + // Clean up downloaded repository once the template is materialized. defer os.RemoveAll(repoDir) return template.Materialize(ctx, configFile, filepath.Join(repoDir, templateDir), outputDir) From 4073e45d4bd7f1d963a71525aef8511001237ab8 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 8 Feb 2024 16:18:53 +0100 Subject: [PATCH 019/286] Use mockery to generate mocks compatible with testify/mock (#1190) ## Changes This is the same approach we use in the Go SDK. ## Tests Tests pass. --- .mockery.yaml | 10 + NOTICE | 8 +- bundle/deploy/terraform/state_pull_test.go | 13 +- bundle/deploy/terraform/state_push_test.go | 16 +- go.mod | 5 +- go.sum | 2 - internal/mocks/README.md | 5 +- internal/mocks/libs/filer/filer_mock.go | 139 -------- internal/mocks/libs/filer/mock_filer.go | 390 +++++++++++++++++++++ 9 files changed, 420 insertions(+), 168 deletions(-) create mode 100644 .mockery.yaml delete mode 100644 internal/mocks/libs/filer/filer_mock.go create mode 100644 internal/mocks/libs/filer/mock_filer.go diff --git a/.mockery.yaml b/.mockery.yaml new file mode 100644 index 000000000..bc9c051cd --- /dev/null +++ b/.mockery.yaml @@ -0,0 +1,10 @@ +with-expecter: true +filename: "mock_{{.InterfaceName | snakecase}}.go" +mockname: "Mock{{.InterfaceName}}" +outpkg: "mock{{.PackageName}}" +packages: + github.com/databricks/cli/libs/filer: + interfaces: + Filer: + config: + dir: "internal/mocks/libs/filer" diff --git a/NOTICE b/NOTICE index 7c7eb7db4..71ba7fbcc 100644 --- a/NOTICE +++ b/NOTICE @@ -16,16 +16,12 @@ go-ini/ini - https://github.com/go-ini/ini Copyright ini authors License - https://github.com/go-ini/ini/blob/main/LICENSE -uber-go/mock - https://go.uber.org/mock -Copyright Google Inc. -License - https://github.com/uber-go/mock/blob/main/LICENSE - —-- This software contains code from the following open source projects, licensed under the MPL 2.0 license: hashicopr/go-version - https://github.com/hashicorp/go-version -Copyright 2014 HashiCorp, Inc. +Copyright 2014 HashiCorp, Inc. License - https://github.com/hashicorp/go-version/blob/main/LICENSE hashicorp/hc-install - https://github.com/hashicorp/hc-install @@ -81,7 +77,7 @@ License - https://github.com/fatih/color/blob/main/LICENSE.md ghodss/yaml - https://github.com/ghodss/yaml Copyright (c) 2014 Sam Ghods License - https://github.com/ghodss/yaml/blob/master/LICENSE - + mattn/go-isatty - https://github.com/mattn/go-isatty Copyright (c) Yasuhiro MATSUMOTO https://github.com/mattn/go-isatty/blob/master/LICENSE diff --git a/bundle/deploy/terraform/state_pull_test.go b/bundle/deploy/terraform/state_pull_test.go index 60eb5d90c..b7734a10f 100644 --- a/bundle/deploy/terraform/state_pull_test.go +++ b/bundle/deploy/terraform/state_pull_test.go @@ -11,25 +11,24 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - mock "github.com/databricks/cli/internal/mocks/libs/filer" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" ) func mockStateFilerForPull(t *testing.T, contents map[string]int, merr error) filer.Filer { buf, err := json.Marshal(contents) require.NoError(t, err) - ctrl := gomock.NewController(t) - mock := mock.NewMockFiler(ctrl) - mock. + f := mockfiler.NewMockFiler(t) + f. EXPECT(). - Read(gomock.Any(), gomock.Eq(TerraformStateFileName)). + Read(mock.Anything, TerraformStateFileName). Return(io.NopCloser(bytes.NewReader(buf)), merr). Times(1) - return mock + return f } func statePullTestBundle(t *testing.T) *bundle.Bundle { diff --git a/bundle/deploy/terraform/state_push_test.go b/bundle/deploy/terraform/state_push_test.go index 4167b3cb9..bd4514a5f 100644 --- a/bundle/deploy/terraform/state_push_test.go +++ b/bundle/deploy/terraform/state_push_test.go @@ -8,25 +8,23 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - mock "github.com/databricks/cli/internal/mocks/libs/filer" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" - "go.uber.org/mock/gomock" + "github.com/stretchr/testify/mock" ) func mockStateFilerForPush(t *testing.T, fn func(body io.Reader)) filer.Filer { - ctrl := gomock.NewController(t) - mock := mock.NewMockFiler(ctrl) - mock. + f := mockfiler.NewMockFiler(t) + f. EXPECT(). - Write(gomock.Any(), gomock.Any(), gomock.Any(), filer.CreateParentDirectories, filer.OverwriteIfExists). - Do(func(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode) error { + Write(mock.Anything, mock.Anything, mock.Anything, filer.CreateParentDirectories, filer.OverwriteIfExists). + Run(func(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode) { fn(reader) - return nil }). Return(nil). Times(1) - return mock + return f } func statePushTestBundle(t *testing.T) *bundle.Bundle { diff --git a/go.mod b/go.mod index f33219aa4..0d42e78e5 100644 --- a/go.mod +++ b/go.mod @@ -30,10 +30,7 @@ require ( gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) -require ( - go.uber.org/mock v0.4.0 - gopkg.in/yaml.v3 v3.0.1 -) +require gopkg.in/yaml.v3 v3.0.1 require ( cloud.google.com/go/compute v1.23.3 // indirect diff --git a/go.sum b/go.sum index 96d043119..ed864e6d2 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,6 @@ go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ3 go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= -go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= diff --git a/internal/mocks/README.md b/internal/mocks/README.md index 231bbfaa4..70ce54546 100644 --- a/internal/mocks/README.md +++ b/internal/mocks/README.md @@ -4,4 +4,7 @@ Use this directory to store mocks for interfaces in this repository. Please use the same package structure for the mocks as the interface it is mocking. -See https://github.com/uber-go/mock for more information on how to generate mocks. +Refresh mocks by running: +``` +go run github.com/vektra/mockery/v2@b9df18e0f7b94f0bc11af3f379c8a9aea1e1e8da +``` diff --git a/internal/mocks/libs/filer/filer_mock.go b/internal/mocks/libs/filer/filer_mock.go deleted file mode 100644 index ef00976a2..000000000 --- a/internal/mocks/libs/filer/filer_mock.go +++ /dev/null @@ -1,139 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/databricks/cli/libs/filer (interfaces: Filer) -// -// Generated by this command: -// -// mockgen -destination filer_mock.go github.com/databricks/cli/libs/filer Filer -// -// Package mock_filer is a generated GoMock package. -package mock_filer - -import ( - context "context" - io "io" - fs "io/fs" - reflect "reflect" - - filer "github.com/databricks/cli/libs/filer" - gomock "go.uber.org/mock/gomock" -) - -// MockFiler is a mock of Filer interface. -type MockFiler struct { - ctrl *gomock.Controller - recorder *MockFilerMockRecorder -} - -// MockFilerMockRecorder is the mock recorder for MockFiler. -type MockFilerMockRecorder struct { - mock *MockFiler -} - -// NewMockFiler creates a new mock instance. -func NewMockFiler(ctrl *gomock.Controller) *MockFiler { - mock := &MockFiler{ctrl: ctrl} - mock.recorder = &MockFilerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockFiler) EXPECT() *MockFilerMockRecorder { - return m.recorder -} - -// Delete mocks base method. -func (m *MockFiler) Delete(arg0 context.Context, arg1 string, arg2 ...filer.DeleteMode) error { - m.ctrl.T.Helper() - varargs := []any{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Delete", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockFilerMockRecorder) Delete(arg0, arg1 any, arg2 ...any) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]any{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockFiler)(nil).Delete), varargs...) -} - -// Mkdir mocks base method. -func (m *MockFiler) Mkdir(arg0 context.Context, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Mkdir", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Mkdir indicates an expected call of Mkdir. -func (mr *MockFilerMockRecorder) Mkdir(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mkdir", reflect.TypeOf((*MockFiler)(nil).Mkdir), arg0, arg1) -} - -// Read mocks base method. -func (m *MockFiler) Read(arg0 context.Context, arg1 string) (io.ReadCloser, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Read", arg0, arg1) - ret0, _ := ret[0].(io.ReadCloser) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Read indicates an expected call of Read. -func (mr *MockFilerMockRecorder) Read(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockFiler)(nil).Read), arg0, arg1) -} - -// ReadDir mocks base method. -func (m *MockFiler) ReadDir(arg0 context.Context, arg1 string) ([]fs.DirEntry, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReadDir", arg0, arg1) - ret0, _ := ret[0].([]fs.DirEntry) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadDir indicates an expected call of ReadDir. -func (mr *MockFilerMockRecorder) ReadDir(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDir", reflect.TypeOf((*MockFiler)(nil).ReadDir), arg0, arg1) -} - -// Stat mocks base method. -func (m *MockFiler) Stat(arg0 context.Context, arg1 string) (fs.FileInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stat", arg0, arg1) - ret0, _ := ret[0].(fs.FileInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Stat indicates an expected call of Stat. -func (mr *MockFilerMockRecorder) Stat(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockFiler)(nil).Stat), arg0, arg1) -} - -// Write mocks base method. -func (m *MockFiler) Write(arg0 context.Context, arg1 string, arg2 io.Reader, arg3 ...filer.WriteMode) error { - m.ctrl.T.Helper() - varargs := []any{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Write", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// Write indicates an expected call of Write. -func (mr *MockFilerMockRecorder) Write(arg0, arg1, arg2 any, arg3 ...any) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]any{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockFiler)(nil).Write), varargs...) -} diff --git a/internal/mocks/libs/filer/mock_filer.go b/internal/mocks/libs/filer/mock_filer.go new file mode 100644 index 000000000..d0d58cbda --- /dev/null +++ b/internal/mocks/libs/filer/mock_filer.go @@ -0,0 +1,390 @@ +// Code generated by mockery v2.39.1. DO NOT EDIT. + +package mockfiler + +import ( + context "context" + fs "io/fs" + + filer "github.com/databricks/cli/libs/filer" + + io "io" + + mock "github.com/stretchr/testify/mock" +) + +// MockFiler is an autogenerated mock type for the Filer type +type MockFiler struct { + mock.Mock +} + +type MockFiler_Expecter struct { + mock *mock.Mock +} + +func (_m *MockFiler) EXPECT() *MockFiler_Expecter { + return &MockFiler_Expecter{mock: &_m.Mock} +} + +// Delete provides a mock function with given fields: ctx, path, mode +func (_m *MockFiler) Delete(ctx context.Context, path string, mode ...filer.DeleteMode) error { + _va := make([]interface{}, len(mode)) + for _i := range mode { + _va[_i] = mode[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, path) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...filer.DeleteMode) error); ok { + r0 = rf(ctx, path, mode...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFiler_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type MockFiler_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - ctx context.Context +// - path string +// - mode ...filer.DeleteMode +func (_e *MockFiler_Expecter) Delete(ctx interface{}, path interface{}, mode ...interface{}) *MockFiler_Delete_Call { + return &MockFiler_Delete_Call{Call: _e.mock.On("Delete", + append([]interface{}{ctx, path}, mode...)...)} +} + +func (_c *MockFiler_Delete_Call) Run(run func(ctx context.Context, path string, mode ...filer.DeleteMode)) *MockFiler_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]filer.DeleteMode, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(filer.DeleteMode) + } + } + run(args[0].(context.Context), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *MockFiler_Delete_Call) Return(_a0 error) *MockFiler_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFiler_Delete_Call) RunAndReturn(run func(context.Context, string, ...filer.DeleteMode) error) *MockFiler_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Mkdir provides a mock function with given fields: ctx, path +func (_m *MockFiler) Mkdir(ctx context.Context, path string) error { + ret := _m.Called(ctx, path) + + if len(ret) == 0 { + panic("no return value specified for Mkdir") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, path) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFiler_Mkdir_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Mkdir' +type MockFiler_Mkdir_Call struct { + *mock.Call +} + +// Mkdir is a helper method to define mock.On call +// - ctx context.Context +// - path string +func (_e *MockFiler_Expecter) Mkdir(ctx interface{}, path interface{}) *MockFiler_Mkdir_Call { + return &MockFiler_Mkdir_Call{Call: _e.mock.On("Mkdir", ctx, path)} +} + +func (_c *MockFiler_Mkdir_Call) Run(run func(ctx context.Context, path string)) *MockFiler_Mkdir_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockFiler_Mkdir_Call) Return(_a0 error) *MockFiler_Mkdir_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFiler_Mkdir_Call) RunAndReturn(run func(context.Context, string) error) *MockFiler_Mkdir_Call { + _c.Call.Return(run) + return _c +} + +// Read provides a mock function with given fields: ctx, path +func (_m *MockFiler) Read(ctx context.Context, path string) (io.ReadCloser, error) { + ret := _m.Called(ctx, path) + + if len(ret) == 0 { + panic("no return value specified for Read") + } + + var r0 io.ReadCloser + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (io.ReadCloser, error)); ok { + return rf(ctx, path) + } + if rf, ok := ret.Get(0).(func(context.Context, string) io.ReadCloser); ok { + r0 = rf(ctx, path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFiler_Read_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Read' +type MockFiler_Read_Call struct { + *mock.Call +} + +// Read is a helper method to define mock.On call +// - ctx context.Context +// - path string +func (_e *MockFiler_Expecter) Read(ctx interface{}, path interface{}) *MockFiler_Read_Call { + return &MockFiler_Read_Call{Call: _e.mock.On("Read", ctx, path)} +} + +func (_c *MockFiler_Read_Call) Run(run func(ctx context.Context, path string)) *MockFiler_Read_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockFiler_Read_Call) Return(_a0 io.ReadCloser, _a1 error) *MockFiler_Read_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFiler_Read_Call) RunAndReturn(run func(context.Context, string) (io.ReadCloser, error)) *MockFiler_Read_Call { + _c.Call.Return(run) + return _c +} + +// ReadDir provides a mock function with given fields: ctx, path +func (_m *MockFiler) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) { + ret := _m.Called(ctx, path) + + if len(ret) == 0 { + panic("no return value specified for ReadDir") + } + + var r0 []fs.DirEntry + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]fs.DirEntry, error)); ok { + return rf(ctx, path) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []fs.DirEntry); ok { + r0 = rf(ctx, path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]fs.DirEntry) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFiler_ReadDir_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReadDir' +type MockFiler_ReadDir_Call struct { + *mock.Call +} + +// ReadDir is a helper method to define mock.On call +// - ctx context.Context +// - path string +func (_e *MockFiler_Expecter) ReadDir(ctx interface{}, path interface{}) *MockFiler_ReadDir_Call { + return &MockFiler_ReadDir_Call{Call: _e.mock.On("ReadDir", ctx, path)} +} + +func (_c *MockFiler_ReadDir_Call) Run(run func(ctx context.Context, path string)) *MockFiler_ReadDir_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockFiler_ReadDir_Call) Return(_a0 []fs.DirEntry, _a1 error) *MockFiler_ReadDir_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFiler_ReadDir_Call) RunAndReturn(run func(context.Context, string) ([]fs.DirEntry, error)) *MockFiler_ReadDir_Call { + _c.Call.Return(run) + return _c +} + +// Stat provides a mock function with given fields: ctx, name +func (_m *MockFiler) Stat(ctx context.Context, name string) (fs.FileInfo, error) { + ret := _m.Called(ctx, name) + + if len(ret) == 0 { + panic("no return value specified for Stat") + } + + var r0 fs.FileInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (fs.FileInfo, error)); ok { + return rf(ctx, name) + } + if rf, ok := ret.Get(0).(func(context.Context, string) fs.FileInfo); ok { + r0 = rf(ctx, name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(fs.FileInfo) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFiler_Stat_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stat' +type MockFiler_Stat_Call struct { + *mock.Call +} + +// Stat is a helper method to define mock.On call +// - ctx context.Context +// - name string +func (_e *MockFiler_Expecter) Stat(ctx interface{}, name interface{}) *MockFiler_Stat_Call { + return &MockFiler_Stat_Call{Call: _e.mock.On("Stat", ctx, name)} +} + +func (_c *MockFiler_Stat_Call) Run(run func(ctx context.Context, name string)) *MockFiler_Stat_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockFiler_Stat_Call) Return(_a0 fs.FileInfo, _a1 error) *MockFiler_Stat_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFiler_Stat_Call) RunAndReturn(run func(context.Context, string) (fs.FileInfo, error)) *MockFiler_Stat_Call { + _c.Call.Return(run) + return _c +} + +// Write provides a mock function with given fields: ctx, path, reader, mode +func (_m *MockFiler) Write(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode) error { + _va := make([]interface{}, len(mode)) + for _i := range mode { + _va[_i] = mode[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, path, reader) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Write") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, io.Reader, ...filer.WriteMode) error); ok { + r0 = rf(ctx, path, reader, mode...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFiler_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write' +type MockFiler_Write_Call struct { + *mock.Call +} + +// Write is a helper method to define mock.On call +// - ctx context.Context +// - path string +// - reader io.Reader +// - mode ...filer.WriteMode +func (_e *MockFiler_Expecter) Write(ctx interface{}, path interface{}, reader interface{}, mode ...interface{}) *MockFiler_Write_Call { + return &MockFiler_Write_Call{Call: _e.mock.On("Write", + append([]interface{}{ctx, path, reader}, mode...)...)} +} + +func (_c *MockFiler_Write_Call) Run(run func(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode)) *MockFiler_Write_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]filer.WriteMode, len(args)-3) + for i, a := range args[3:] { + if a != nil { + variadicArgs[i] = a.(filer.WriteMode) + } + } + run(args[0].(context.Context), args[1].(string), args[2].(io.Reader), variadicArgs...) + }) + return _c +} + +func (_c *MockFiler_Write_Call) Return(_a0 error) *MockFiler_Write_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFiler_Write_Call) RunAndReturn(run func(context.Context, string, io.Reader, ...filer.WriteMode) error) *MockFiler_Write_Call { + _c.Call.Return(run) + return _c +} + +// NewMockFiler creates a new instance of MockFiler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockFiler(t interface { + mock.TestingT + Cleanup(func()) +}) *MockFiler { + mock := &MockFiler{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} From cac112c5bc27f49cb05e3b99b42be51a0d1fd829 Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Thu, 8 Feb 2024 20:26:41 +0000 Subject: [PATCH 020/286] Update LICENSE (#1013) See https://www.databricks.com/legal/db-license --- LICENSE | 82 ++++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 63 insertions(+), 19 deletions(-) diff --git a/LICENSE b/LICENSE index 21db58bb9..dc30b4656 100644 --- a/LICENSE +++ b/LICENSE @@ -1,25 +1,69 @@ -DB license + Databricks License + Copyright (2022) Databricks, Inc. -Copyright (2022) Databricks, Inc. + Definitions. + + Agreement: The agreement between Databricks, Inc., and you governing + the use of the Databricks Services, as that term is defined in + the Master Cloud Services Agreement (MCSA) located at + www.databricks.com/legal/mcsa. + + Licensed Materials: The source code, object code, data, and/or other + works to which this license applies. -Definitions. + Scope of Use. You may not use the Licensed Materials except in + connection with your use of the Databricks Services pursuant to + the Agreement. Your use of the Licensed Materials must comply at all + times with any restrictions applicable to the Databricks Services, + generally, and must be used in accordance with any applicable + documentation. You may view, use, copy, modify, publish, and/or + distribute the Licensed Materials solely for the purposes of using + the Licensed Materials within or connecting to the Databricks Services. + If you do not agree to these terms, you may not view, use, copy, + modify, publish, and/or distribute the Licensed Materials. + + Redistribution. You may redistribute and sublicense the Licensed + Materials so long as all use is in compliance with these terms. + In addition: + + - You must give any other recipients a copy of this License; + - You must cause any modified files to carry prominent notices + stating that you changed the files; + - You must retain, in any derivative works that you distribute, + all copyright, patent, trademark, and attribution notices, + excluding those notices that do not pertain to any part of + the derivative works; and + - If a "NOTICE" text file is provided as part of its + distribution, then any derivative works that you distribute + must include a readable copy of the attribution notices + contained within such NOTICE file, excluding those notices + that do not pertain to any part of the derivative works. -Agreement: The agreement between Databricks, Inc., and you governing the use of the Databricks Services, which shall be, with respect to Databricks, the Databricks Terms of Service located at www.databricks.com/termsofservice, and with respect to Databricks Community Edition, the Community Edition Terms of Service located at www.databricks.com/ce-termsofuse, in each case unless you have entered into a separate written agreement with Databricks governing the use of the applicable Databricks Services. + You may add your own copyright statement to your modifications and may + provide additional license terms and conditions for use, reproduction, + or distribution of your modifications, or for any such derivative works + as a whole, provided your use, reproduction, and distribution of + the Licensed Materials otherwise complies with the conditions stated + in this License. -Software: The source code and object code to which this license applies. + Termination. This license terminates automatically upon your breach of + these terms or upon the termination of your Agreement. Additionally, + Databricks may terminate this license at any time on notice. Upon + termination, you must permanently delete the Licensed Materials and + all copies thereof. -Scope of Use. You may not use this Software except in connection with your use of the Databricks Services pursuant to the Agreement. Your use of the Software must comply at all times with any restrictions applicable to the Databricks Services, generally, and must be used in accordance with any applicable documentation. You may view, use, copy, modify, publish, and/or distribute the Software solely for the purposes of using the code within or connecting to the Databricks Services. If you do not agree to these terms, you may not view, use, copy, modify, publish, and/or distribute the Software. + DISCLAIMER; LIMITATION OF LIABILITY. -Redistribution. You may redistribute and sublicense the Software so long as all use is in compliance with these terms. In addition: - -You must give any other recipients a copy of this License; -You must cause any modified files to carry prominent notices stating that you changed the files; -You must retain, in the source code form of any derivative works that you distribute, all copyright, patent, trademark, and attribution notices from the source code form, excluding those notices that do not pertain to any part of the derivative works; and -If the source code form includes a "NOTICE" text file as part of its distribution, then any derivative works that you distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the derivative works. -You may add your own copyright statement to your modifications and may provide additional license terms and conditions for use, reproduction, or distribution of your modifications, or for any such derivative works as a whole, provided your use, reproduction, and distribution of the Software otherwise complies with the conditions stated in this License. - -Termination. This license terminates automatically upon your breach of these terms or upon the termination of your Agreement. Additionally, Databricks may terminate this license at any time on notice. Upon termination, you must permanently delete the Software and all copies thereof. - -DISCLAIMER; LIMITATION OF LIABILITY. - -THE SOFTWARE IS PROVIDED “AS-IS” AND WITH ALL FAULTS. DATABRICKS, ON BEHALF OF ITSELF AND ITS LICENSORS, SPECIFICALLY DISCLAIMS ALL WARRANTIES RELATING TO THE SOURCE CODE, EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, IMPLIED WARRANTIES, CONDITIONS AND OTHER TERMS OF MERCHANTABILITY, SATISFACTORY QUALITY OR FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. DATABRICKS AND ITS LICENSORS TOTAL AGGREGATE LIABILITY RELATING TO OR ARISING OUT OF YOUR USE OF OR DATABRICKS’ PROVISIONING OF THE SOURCE CODE SHALL BE LIMITED TO ONE THOUSAND ($1,000) DOLLARS. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + THE LICENSED MATERIALS ARE PROVIDED “AS-IS” AND WITH ALL FAULTS. + DATABRICKS, ON BEHALF OF ITSELF AND ITS LICENSORS, SPECIFICALLY + DISCLAIMS ALL WARRANTIES RELATING TO THE LICENSED MATERIALS, EXPRESS + AND IMPLIED, INCLUDING, WITHOUT LIMITATION, IMPLIED WARRANTIES, + CONDITIONS AND OTHER TERMS OF MERCHANTABILITY, SATISFACTORY QUALITY OR + FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. DATABRICKS AND + ITS LICENSORS TOTAL AGGREGATE LIABILITY RELATING TO OR ARISING OUT OF + YOUR USE OF OR DATABRICKS’ PROVISIONING OF THE LICENSED MATERIALS SHALL + BE LIMITED TO ONE THOUSAND ($1,000) DOLLARS. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE LICENSED MATERIALS OR + THE USE OR OTHER DEALINGS IN THE LICENSED MATERIALS. From bc30c9ed4a2670bce568379cc4f743360707fd79 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 9 Feb 2024 15:33:14 +0100 Subject: [PATCH 021/286] Added `--restart` flag for `bundle run` command (#1191) ## Changes Added `--restart` flag for `bundle run` command When running with this flag, `bundle run` will cancel all existing runs before starting a new one ## Tests Manually --- bundle/run/job.go | 40 +++++++++++++++++++ bundle/run/job_test.go | 79 +++++++++++++++++++++++++++++++++++++ bundle/run/pipeline.go | 15 +++++++ bundle/run/pipeline_test.go | 49 +++++++++++++++++++++++ bundle/run/runner.go | 3 ++ cmd/bundle/run.go | 11 ++++++ 6 files changed, 197 insertions(+) create mode 100644 bundle/run/pipeline_test.go diff --git a/bundle/run/job.go b/bundle/run/job.go index a54279c11..043ea846a 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -15,6 +15,7 @@ import ( "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/fatih/color" + "golang.org/x/sync/errgroup" ) // Default timeout for waiting for a job run to complete. @@ -275,3 +276,42 @@ func (r *jobRunner) convertPythonParams(opts *Options) error { return nil } + +func (r *jobRunner) Cancel(ctx context.Context) error { + w := r.bundle.WorkspaceClient() + jobID, err := strconv.ParseInt(r.job.ID, 10, 64) + if err != nil { + return fmt.Errorf("job ID is not an integer: %s", r.job.ID) + } + + runs, err := w.Jobs.ListRunsAll(ctx, jobs.ListRunsRequest{ + ActiveOnly: true, + JobId: jobID, + }) + + if err != nil { + return err + } + + if len(runs) == 0 { + return nil + } + + errGroup, errCtx := errgroup.WithContext(ctx) + for _, run := range runs { + runId := run.RunId + errGroup.Go(func() error { + wait, err := w.Jobs.CancelRun(errCtx, jobs.CancelRun{ + RunId: runId, + }) + if err != nil { + return err + } + // Waits for the Terminated or Skipped state + _, err = wait.GetWithTimeout(jobRunTimeout) + return err + }) + } + + return errGroup.Wait() +} diff --git a/bundle/run/job_test.go b/bundle/run/job_test.go index e4cb4e7e8..be189306b 100644 --- a/bundle/run/job_test.go +++ b/bundle/run/job_test.go @@ -1,12 +1,16 @@ package run import ( + "context" "testing" + "time" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -47,3 +51,78 @@ func TestConvertPythonParams(t *testing.T) { require.Contains(t, opts.Job.notebookParams, "__python_params") require.Equal(t, opts.Job.notebookParams["__python_params"], `["param1","param2","param3"]`) } + +func TestJobRunnerCancel(t *testing.T) { + job := &resources.Job{ + ID: "123", + } + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test_job": job, + }, + }, + }, + } + + runner := jobRunner{key: "test", bundle: b, job: job} + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + jobApi := m.GetMockJobsAPI() + jobApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ + ActiveOnly: true, + JobId: 123, + }).Return([]jobs.BaseRun{ + {RunId: 1}, + {RunId: 2}, + }, nil) + + mockWait := &jobs.WaitGetRunJobTerminatedOrSkipped[struct{}]{ + Poll: func(time time.Duration, f func(j *jobs.Run)) (*jobs.Run, error) { + return nil, nil + }, + } + jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{ + RunId: 1, + }).Return(mockWait, nil) + jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{ + RunId: 2, + }).Return(mockWait, nil) + + err := runner.Cancel(context.Background()) + require.NoError(t, err) +} + +func TestJobRunnerCancelWithNoActiveRuns(t *testing.T) { + job := &resources.Job{ + ID: "123", + } + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test_job": job, + }, + }, + }, + } + + runner := jobRunner{key: "test", bundle: b, job: job} + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + jobApi := m.GetMockJobsAPI() + jobApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ + ActiveOnly: true, + JobId: 123, + }).Return([]jobs.BaseRun{}, nil) + + jobApi.AssertNotCalled(t, "CancelRun") + + err := runner.Cancel(context.Background()) + require.NoError(t, err) +} diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index 342a771b1..e1f5bfe5f 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -166,3 +166,18 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp time.Sleep(time.Second) } } + +func (r *pipelineRunner) Cancel(ctx context.Context) error { + w := r.bundle.WorkspaceClient() + wait, err := w.Pipelines.Stop(ctx, pipelines.StopRequest{ + PipelineId: r.pipeline.ID, + }) + + if err != nil { + return err + } + + // Waits for the Idle state of the pipeline + _, err = wait.GetWithTimeout(jobRunTimeout) + return err +} diff --git a/bundle/run/pipeline_test.go b/bundle/run/pipeline_test.go new file mode 100644 index 000000000..29b57ffdb --- /dev/null +++ b/bundle/run/pipeline_test.go @@ -0,0 +1,49 @@ +package run + +import ( + "context" + "testing" + "time" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/require" +) + +func TestPipelineRunnerCancel(t *testing.T) { + pipeline := &resources.Pipeline{ + ID: "123", + } + + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "test_pipeline": pipeline, + }, + }, + }, + } + + runner := pipelineRunner{key: "test", bundle: b, pipeline: pipeline} + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + mockWait := &pipelines.WaitGetPipelineIdle[struct{}]{ + Poll: func(time.Duration, func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error) { + return nil, nil + }, + } + + pipelineApi := m.GetMockPipelinesAPI() + pipelineApi.EXPECT().Stop(context.Background(), pipelines.StopRequest{ + PipelineId: "123", + }).Return(mockWait, nil) + + err := runner.Cancel(context.Background()) + require.NoError(t, err) +} diff --git a/bundle/run/runner.go b/bundle/run/runner.go index 7d3c2c297..de2a1ae7a 100644 --- a/bundle/run/runner.go +++ b/bundle/run/runner.go @@ -26,6 +26,9 @@ type Runner interface { // Run the underlying worklow. Run(ctx context.Context, opts *Options) (output.RunOutput, error) + + // Cancel the underlying workflow. + Cancel(ctx context.Context) error } // Find locates a runner matching the specified argument. diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index a4b106588..c1a8d4ea9 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -27,7 +27,9 @@ func newRunCommand() *cobra.Command { runOptions.Define(cmd) var noWait bool + var restart bool cmd.Flags().BoolVar(&noWait, "no-wait", false, "Don't wait for the run to complete.") + cmd.Flags().BoolVar(&restart, "restart", false, "Restart the run if it is already running.") cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -68,6 +70,15 @@ func newRunCommand() *cobra.Command { } runOptions.NoWait = noWait + if restart { + s := cmdio.Spinner(ctx) + s <- "Cancelling all runs" + err := runner.Cancel(ctx) + close(s) + if err != nil { + return err + } + } output, err := runner.Run(ctx, &runOptions) if err != nil { return err From 2c752e7cb7fdc2136a1ed47c251f17c8c49de46d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 11:57:27 +0000 Subject: [PATCH 022/286] Bump github.com/hashicorp/hc-install from 0.6.2 to 0.6.3 (#1200) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/hashicorp/hc-install](https://github.com/hashicorp/hc-install) from 0.6.2 to 0.6.3.
Release notes

Sourced from github.com/hashicorp/hc-install's releases.

v0.6.3

DEPENDENCIES:

INTERNAL:

Commits
  • 2c6cd56 Update VERSION to 0.6.3
  • dbd7e10 build(deps): bump golang.org/x/mod from 0.14.0 to 0.15.0 (#184)
  • 1ffdbbf deps: Bump ProtonMail/go-crypto to v1.1.0-alpha.0 (#183)
  • f2f9c0b Result of tsccr-helper -log-level=info gha update -latest . (#182)
  • 82fbd96 Result of tsccr-helper -log-level=info gha update -latest . (#180)
  • c4ced0f build(deps): bump github.com/ProtonMail/go-crypto (#179)
  • 33c1a77 Result of tsccr-helper -log-level=info gha update -latest . (#177)
  • bf0a379 deps: Replace mitchellh/cli w/ hashicorp/cli (#176)
  • 02d1410 build(deps): bump golang.org/x/crypto from 0.16.0 to 0.17.0 (#175)
  • 946b925 go: bump version to 1.21.5 (#174)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/hc-install&package-manager=go_modules&previous-version=0.6.2&new-version=0.6.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 35 ++++++++--------------------------- 2 files changed, 11 insertions(+), 30 deletions(-) diff --git a/go.mod b/go.mod index 0d42e78e5..905458a32 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 - github.com/hashicorp/hc-install v0.6.2 // MPL 2.0 + github.com/hashicorp/hc-install v0.6.3 // MPL 2.0 github.com/hashicorp/terraform-exec v0.20.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.21.0 // MPL 2.0 github.com/imdario/mergo v0.3.15 // BSD-3-Clause @@ -22,7 +22,7 @@ require ( github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.8.4 // MIT golang.org/x/exp v0.0.0-20231006140011-7918f672742d - golang.org/x/mod v0.14.0 + golang.org/x/mod v0.15.0 golang.org/x/oauth2 v0.16.0 golang.org/x/sync v0.6.0 golang.org/x/term v0.16.0 @@ -35,7 +35,7 @@ require gopkg.in/yaml.v3 v3.0.1 require ( cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect github.com/cloudflare/circl v1.3.7 // indirect diff --git a/go.sum b/go.sum index ed864e6d2..fd111b332 100644 --- a/go.sum +++ b/go.sum @@ -8,13 +8,12 @@ dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -23,7 +22,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -52,8 +50,8 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= -github.com/go-git/go-git/v5 v5.10.1/go.mod h1:uEuHjxkHap8kAl//V5F/nNWwqIYtP/402ddd05mp0wg= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -102,8 +100,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U3mw/M= -github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= +github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= +github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= @@ -176,8 +174,6 @@ go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -187,9 +183,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -199,9 +194,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -211,7 +203,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -227,26 +218,17 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -258,7 +240,6 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From feb20d59a470a04d7e37938b3bd8d3f05fc38cd2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 14:12:41 +0100 Subject: [PATCH 023/286] Bump golang.org/x/term from 0.16.0 to 0.17.0 (#1197) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.16.0 to 0.17.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.16.0&new-version=0.17.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 905458a32..9bd281144 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( golang.org/x/mod v0.15.0 golang.org/x/oauth2 v0.16.0 golang.org/x/sync v0.6.0 - golang.org/x/term v0.16.0 + golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) @@ -61,7 +61,7 @@ require ( go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/crypto v0.18.0 // indirect golang.org/x/net v0.20.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.154.0 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index fd111b332..ffd11ceb0 100644 --- a/go.sum +++ b/go.sum @@ -219,12 +219,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From cbf75b157dfbd96713721c03547257157e74c53b Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Mon, 12 Feb 2024 16:04:14 +0100 Subject: [PATCH 024/286] Avoid race-conditions while executing sub-commands (#1201) ## Changes `executor.Exec` now uses `cmd.CombinedOutput`. Previous implementation was hanging on my windows VM during `bundle deploy` on the `ReadAll(MultiReader(stdout, stderr))` line. The problem is related to the fact the MultiReader reads sequentially, and the `stdout` is the first in line. Even simple `io.ReadAll(stdout)` hangs on me, as it seems like the command that we spawn (python wheel build) waits for the error stream to be finished before closing stdout on its own side? Reading `stderr` (or `out`) in a separate go-routine fixes the deadlock, but `cmd.CombinedOutput` feels like a simpler solution. Also noticed that Exec was not removing `scriptFile` after itself, fixed that too. ## Tests Unit tests and manually --- libs/exec/exec.go | 30 ++++++++++++++++-------------- libs/exec/exec_test.go | 15 ++++++++++++--- 2 files changed, 28 insertions(+), 17 deletions(-) diff --git a/libs/exec/exec.go b/libs/exec/exec.go index 9767c199a..8e4633271 100644 --- a/libs/exec/exec.go +++ b/libs/exec/exec.go @@ -90,18 +90,25 @@ func NewCommandExecutorWithExecutable(dir string, execType ExecutableType) (*Exe }, nil } -func (e *Executor) StartCommand(ctx context.Context, command string) (Command, error) { +func (e *Executor) prepareCommand(ctx context.Context, command string) (*osexec.Cmd, *execContext, error) { ec, err := e.shell.prepare(command) + if err != nil { + return nil, nil, err + } + cmd := osexec.CommandContext(ctx, ec.executable, ec.args...) + cmd.Dir = e.dir + return cmd, ec, nil +} + +func (e *Executor) StartCommand(ctx context.Context, command string) (Command, error) { + cmd, ec, err := e.prepareCommand(ctx, command) if err != nil { return nil, err } - return e.start(ctx, ec) + return e.start(ctx, cmd, ec) } -func (e *Executor) start(ctx context.Context, ec *execContext) (Command, error) { - cmd := osexec.CommandContext(ctx, ec.executable, ec.args...) - cmd.Dir = e.dir - +func (e *Executor) start(ctx context.Context, cmd *osexec.Cmd, ec *execContext) (Command, error) { stdout, err := cmd.StdoutPipe() if err != nil { return nil, err @@ -116,17 +123,12 @@ func (e *Executor) start(ctx context.Context, ec *execContext) (Command, error) } func (e *Executor) Exec(ctx context.Context, command string) ([]byte, error) { - cmd, err := e.StartCommand(ctx, command) + cmd, ec, err := e.prepareCommand(ctx, command) if err != nil { return nil, err } - - res, err := io.ReadAll(io.MultiReader(cmd.Stdout(), cmd.Stderr())) - if err != nil { - return nil, err - } - - return res, cmd.Wait() + defer os.Remove(ec.scriptFile) + return cmd.CombinedOutput() } func (e *Executor) ShellType() ExecutableType { diff --git a/libs/exec/exec_test.go b/libs/exec/exec_test.go index 0730638e3..ad54601d0 100644 --- a/libs/exec/exec_test.go +++ b/libs/exec/exec_test.go @@ -32,6 +32,15 @@ func TestExecutorWithComplexInput(t *testing.T) { assert.Equal(t, "Hello\nWorld\n", string(out)) } +func TestExecutorWithStderr(t *testing.T) { + executor, err := NewCommandExecutor(".") + assert.NoError(t, err) + out, err := executor.Exec(context.Background(), "echo 'Hello' && >&2 echo 'Error'") + assert.NoError(t, err) + assert.NotNil(t, out) + assert.Equal(t, "Hello\nError\n", string(out)) +} + func TestExecutorWithInvalidCommand(t *testing.T) { executor, err := NewCommandExecutor(".") assert.NoError(t, err) @@ -108,16 +117,16 @@ func TestExecutorCleanupsTempFiles(t *testing.T) { executor, err := NewCommandExecutor(".") assert.NoError(t, err) - ec, err := executor.shell.prepare("echo 'Hello'") + cmd, ec, err := executor.prepareCommand(context.Background(), "echo 'Hello'") assert.NoError(t, err) - cmd, err := executor.start(context.Background(), ec) + command, err := executor.start(context.Background(), cmd, ec) assert.NoError(t, err) fileName := ec.args[1] assert.FileExists(t, fileName) - err = cmd.Wait() + err = command.Wait() assert.NoError(t, err) assert.NoFileExists(t, fileName) } From 36241ee55e9708fe5519c330e102174031016aea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 15:08:35 +0000 Subject: [PATCH 025/286] Bump golang.org/x/oauth2 from 0.16.0 to 0.17.0 (#1198) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.16.0 to 0.17.0.
Commits
  • ebe81ad go.mod: update golang.org/x dependencies
  • adffd94 google/internal/externalaccount: update serviceAccountImpersonationRE to supp...
  • deefa7e google/downscope: add DownscopingConfig.UniverseDomain to support TPC
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/oauth2&package-manager=go_modules&previous-version=0.16.0&new-version=0.17.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 9bd281144..6a634ca28 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/stretchr/testify v1.8.4 // MIT golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/mod v0.15.0 - golang.org/x/oauth2 v0.16.0 + golang.org/x/oauth2 v0.17.0 golang.org/x/sync v0.6.0 golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 @@ -59,8 +59,8 @@ require ( go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.154.0 // indirect diff --git a/go.sum b/go.sum index ffd11ceb0..a754df59f 100644 --- a/go.sum +++ b/go.sum @@ -174,8 +174,8 @@ go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= @@ -194,11 +194,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From aa0c715930a78ea6b5fcc71c722ae8e426360a0f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 13 Feb 2024 15:12:19 +0100 Subject: [PATCH 026/286] Retain partially valid structs in `convert.Normalize` (#1203) ## Changes Before this change, any error in a subtree would cause the entire subtree to be dropped from the output. This is not ideal when debugging, so instead we drop only the values that cannot be normalized. Note that this doesn't change behavior if the caller is properly checking the returned diagnostics for errors. Note: this includes a change to use `dyn.InvalidValue` as opposed to `dyn.NilValue` when returning errors. ## Tests Added unit tests for the case where nested struct, map, or slice elements contain an error. --- libs/dyn/convert/from_typed.go | 22 +++--- libs/dyn/convert/normalize.go | 28 ++++---- libs/dyn/convert/normalize_test.go | 106 +++++++++++++++++++++++++++++ 3 files changed, 131 insertions(+), 25 deletions(-) diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index bd6b63670..6dcca2b85 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -59,7 +59,7 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, return fromTypedFloat(srcv, ref, options...) } - return dyn.NilValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) + return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) } func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { @@ -67,7 +67,7 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { switch ref.Kind() { case dyn.KindMap, dyn.KindNil: default: - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } out := make(map[string]dyn.Value) @@ -76,7 +76,7 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Convert the field taking into account the reference value (may be equal to config.NilValue). nv, err := fromTyped(v.Interface(), ref.Get(k)) if err != nil { - return dyn.Value{}, err + return dyn.InvalidValue, err } if nv != dyn.NilValue { @@ -92,7 +92,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { switch ref.Kind() { case dyn.KindMap, dyn.KindNil: default: - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } // Return nil if the map is nil. @@ -109,7 +109,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Convert entry taking into account the reference value (may be equal to dyn.NilValue). nv, err := fromTyped(v.Interface(), ref.Get(k), includeZeroValues) if err != nil { - return dyn.Value{}, err + return dyn.InvalidValue, err } // Every entry is represented, even if it is a nil. @@ -125,7 +125,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { switch ref.Kind() { case dyn.KindSequence, dyn.KindNil: default: - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } // Return nil if the slice is nil. @@ -140,7 +140,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Convert entry taking into account the reference value (may be equal to dyn.NilValue). nv, err := fromTyped(v.Interface(), ref.Index(i), includeZeroValues) if err != nil { - return dyn.Value{}, err + return dyn.InvalidValue, err } out[i] = nv @@ -167,7 +167,7 @@ func fromTypedString(src reflect.Value, ref dyn.Value, options ...fromTypedOptio return dyn.V(src.String()), nil } - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } func fromTypedBool(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -187,7 +187,7 @@ func fromTypedBool(src reflect.Value, ref dyn.Value, options ...fromTypedOptions return dyn.V(src.Bool()), nil } - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } func fromTypedInt(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -207,7 +207,7 @@ func fromTypedInt(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) return dyn.V(src.Int()), nil } - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } func fromTypedFloat(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -227,5 +227,5 @@ func fromTypedFloat(src reflect.Value, ref dyn.Value, options ...fromTypedOption return dyn.V(src.Float()), nil } - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 7a652cbc7..5595aae1e 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -35,7 +35,7 @@ func normalizeType(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics return normalizeFloat(typ, src) } - return dyn.NilValue, diag.Errorf("unsupported type: %s", typ.Kind()) + return dyn.InvalidValue, diag.Errorf("unsupported type: %s", typ.Kind()) } func typeMismatch(expected dyn.Kind, src dyn.Value) diag.Diagnostic { @@ -69,7 +69,7 @@ func normalizeStruct(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnosti if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. - if err.HasError() { + if !v.IsValid() { continue } } @@ -82,7 +82,7 @@ func normalizeStruct(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnosti return src, diags } - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindMap, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src)) } func normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { @@ -97,7 +97,7 @@ func normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. - if err.HasError() { + if !v.IsValid() { continue } } @@ -110,7 +110,7 @@ func normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) return src, diags } - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindMap, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src)) } func normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { @@ -125,7 +125,7 @@ func normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostic if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. - if err.HasError() { + if !v.IsValid() { continue } } @@ -138,7 +138,7 @@ func normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostic return src, diags } - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindSequence, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindSequence, src)) } func normalizeString(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { @@ -155,7 +155,7 @@ func normalizeString(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnosti case dyn.KindFloat: out = strconv.FormatFloat(src.MustFloat(), 'f', -1, 64) default: - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindString, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindString, src)) } return dyn.NewValue(out, src.Location()), diags @@ -177,10 +177,10 @@ func normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics out = false default: // Cannot interpret as a boolean. - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindBool, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src)) } default: - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindBool, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src)) } return dyn.NewValue(out, src.Location()), diags @@ -197,14 +197,14 @@ func normalizeInt(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) var err error out, err = strconv.ParseInt(src.MustString(), 10, 64) if err != nil { - return dyn.NilValue, diags.Append(diag.Diagnostic{ + return dyn.InvalidValue, diags.Append(diag.Diagnostic{ Severity: diag.Error, Summary: fmt.Sprintf("cannot parse %q as an integer", src.MustString()), Location: src.Location(), }) } default: - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindInt, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindInt, src)) } return dyn.NewValue(out, src.Location()), diags @@ -221,14 +221,14 @@ func normalizeFloat(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostic var err error out, err = strconv.ParseFloat(src.MustString(), 64) if err != nil { - return dyn.NilValue, diags.Append(diag.Diagnostic{ + return dyn.InvalidValue, diags.Append(diag.Diagnostic{ Severity: diag.Error, Summary: fmt.Sprintf("cannot parse %q as a floating point number", src.MustString()), Location: src.Location(), }) } default: - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindFloat, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindFloat, src)) } return dyn.NewValue(out, src.Location()), diags diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 13b1ed52f..702816155 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -104,6 +104,44 @@ func TestNormalizeStructError(t *testing.T) { }, err[0]) } +func TestNormalizeStructNestedError(t *testing.T) { + type Nested struct { + F1 int `json:"f1"` + F2 int `json:"f2"` + } + type Tmp struct { + Foo Nested `json:"foo"` + Bar Nested `json:"bar"` + } + + var typ Tmp + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V(map[string]dyn.Value{ + "f1": dyn.V("error"), + "f2": dyn.V(1), + }), + "bar": dyn.V(map[string]dyn.Value{ + "f1": dyn.V(1), + "f2": dyn.V("error"), + }), + }) + vout, err := Normalize(typ, vin) + assert.Len(t, err, 2) + + // Verify that valid fields are retained. + assert.Equal(t, + dyn.V(map[string]dyn.Value{ + "foo": dyn.V(map[string]dyn.Value{ + "f2": dyn.V(int64(1)), + }), + "bar": dyn.V(map[string]dyn.Value{ + "f1": dyn.V(int64(1)), + }), + }), + vout, + ) +} + func TestNormalizeMap(t *testing.T) { var typ map[string]string vin := dyn.V(map[string]dyn.Value{ @@ -157,6 +195,40 @@ func TestNormalizeMapError(t *testing.T) { }, err[0]) } +func TestNormalizeMapNestedError(t *testing.T) { + type Nested struct { + F1 int `json:"f1"` + F2 int `json:"f2"` + } + + var typ map[string]Nested + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V(map[string]dyn.Value{ + "f1": dyn.V("error"), + "f2": dyn.V(1), + }), + "bar": dyn.V(map[string]dyn.Value{ + "f1": dyn.V(1), + "f2": dyn.V("error"), + }), + }) + vout, err := Normalize(typ, vin) + assert.Len(t, err, 2) + + // Verify that valid fields are retained. + assert.Equal(t, + dyn.V(map[string]dyn.Value{ + "foo": dyn.V(map[string]dyn.Value{ + "f2": dyn.V(int64(1)), + }), + "bar": dyn.V(map[string]dyn.Value{ + "f1": dyn.V(int64(1)), + }), + }), + vout, + ) +} + func TestNormalizeSlice(t *testing.T) { var typ []string vin := dyn.V([]dyn.Value{ @@ -209,6 +281,40 @@ func TestNormalizeSliceError(t *testing.T) { }, err[0]) } +func TestNormalizeSliceNestedError(t *testing.T) { + type Nested struct { + F1 int `json:"f1"` + F2 int `json:"f2"` + } + + var typ []Nested + vin := dyn.V([]dyn.Value{ + dyn.V(map[string]dyn.Value{ + "f1": dyn.V("error"), + "f2": dyn.V(1), + }), + dyn.V(map[string]dyn.Value{ + "f1": dyn.V(1), + "f2": dyn.V("error"), + }), + }) + vout, err := Normalize(typ, vin) + assert.Len(t, err, 2) + + // Verify that valid fields are retained. + assert.Equal(t, + dyn.V([]dyn.Value{ + dyn.V(map[string]dyn.Value{ + "f2": dyn.V(int64(1)), + }), + dyn.V(map[string]dyn.Value{ + "f1": dyn.V(int64(1)), + }), + }), + vout, + ) +} + func TestNormalizeString(t *testing.T) { var typ string vin := dyn.V("string") From 52b813bd8eea28027f2d2c034ae0e2969874b213 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 13 Feb 2024 19:43:47 +0530 Subject: [PATCH 027/286] Skip `for_each_task` when generating the bundle schema (#1204) ## Changes Bundle schema generation does not support recursive API fields. This PR skips generation for for_each_task until we add proper support for recursive types in the bundle schema. ## Tests Manually. This fixes the generation of the CLI and the bundle schema command works as expected, with the sub-schema for `for_each_task` being set to null in the output. ``` "for_each_task": null, ``` --- bundle/schema/openapi.go | 7 +++++++ bundle/schema/schema.go | 6 ++++++ 2 files changed, 13 insertions(+) diff --git a/bundle/schema/openapi.go b/bundle/schema/openapi.go index 0b64c43e3..fe329e7ac 100644 --- a/bundle/schema/openapi.go +++ b/bundle/schema/openapi.go @@ -71,6 +71,13 @@ func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *t return reader.traverseSchema(root, tracker) } key := *root.Reference + + // HACK to unblock CLI release (13th Feb 2024). This is temporary until proper + // support for recursive types is added to the docs generator. PR: https://github.com/databricks/cli/pull/1204 + if strings.Contains(key, "ForEachTask") { + return root, nil + } + if tracker.hasCycle(key) { // self reference loops can be supported however the logic is non-trivial because // cross refernce loops are not allowed (see: http://json-schema.org/understanding-json-schema/structuring.html#recursion) diff --git a/bundle/schema/schema.go b/bundle/schema/schema.go index 8b5c36d12..7153f38f6 100644 --- a/bundle/schema/schema.go +++ b/bundle/schema/schema.go @@ -92,6 +92,12 @@ func jsonSchemaType(golangType reflect.Type) (jsonschema.Type, error) { // // - tracker: Keeps track of types / traceIds seen during recursive traversal func safeToSchema(golangType reflect.Type, docs *Docs, traceId string, tracker *tracker) (*jsonschema.Schema, error) { + // HACK to unblock CLI release (13th Feb 2024). This is temporary until proper + // support for recursive types is added to the schema generator. PR: https://github.com/databricks/cli/pull/1204 + if traceId == "for_each_task" { + return nil, nil + } + // WE ERROR OUT IF THERE ARE CYCLES IN THE JSON SCHEMA // There are mechanisms to deal with cycles though recursive identifiers in json // schema. However if we use them, we would need to make sure we are able to detect From e8b0698e19cd9718ba89326ef7e8d7b84de7fb9f Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Tue, 13 Feb 2024 15:33:59 +0100 Subject: [PATCH 028/286] Regenerate the CLI using the same OpenAPI spec as the SDK (#1205) ## Changes The OpenAPI spec used to generate the CLI doesn't match the version used for the SDK version that the CLI currently depends on. This PR regenerates the CLI based on the same version of the OpenAPI spec used by the SDK on v0.30.1. ## Tests --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + bundle/schema/docs/bundle_descriptions.json | 166 ++++++- .../cluster-policies/cluster-policies.go | 9 +- cmd/workspace/clusters/clusters.go | 5 +- cmd/workspace/cmd.go | 2 + cmd/workspace/dashboards/dashboards.go | 95 +++- cmd/workspace/experiments/experiments.go | 8 +- .../global-init-scripts.go | 2 +- .../lakehouse-monitors/lakehouse-monitors.go | 414 ++++++++++++++++++ cmd/workspace/pipelines/pipelines.go | 1 + cmd/workspace/queries/queries.go | 6 +- cmd/workspace/tables/tables.go | 93 +++- .../token-management/token-management.go | 53 ++- 14 files changed, 810 insertions(+), 47 deletions(-) create mode 100755 cmd/workspace/lakehouse-monitors/lakehouse-monitors.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 56c8253ff..f705ffea6 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -a7a9dc025bb80303e676bf3708942c6aa06689f1 \ No newline at end of file +e05401ed5dd4974c5333d737ec308a7d451f749f \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 7a1750caa..fe33227a7 100755 --- a/.gitattributes +++ b/.gitattributes @@ -50,6 +50,7 @@ cmd/workspace/instance-pools/instance-pools.go linguist-generated=true cmd/workspace/instance-profiles/instance-profiles.go linguist-generated=true cmd/workspace/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/workspace/jobs/jobs.go linguist-generated=true +cmd/workspace/lakehouse-monitors/lakehouse-monitors.go linguist-generated=true cmd/workspace/lakeview/lakeview.go linguist-generated=true cmd/workspace/libraries/libraries.go linguist-generated=true cmd/workspace/metastores/metastores.go linguist-generated=true diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 8d16970c5..fb28247ac 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -9,6 +9,9 @@ "build": { "description": "" }, + "executable": { + "description": "" + }, "files": { "description": "", "items": { @@ -35,6 +38,14 @@ "compute_id": { "description": "" }, + "deployment": { + "description": "", + "properties": { + "fail_on_active_runs": { + "description": "" + } + } + }, "git": { "description": "", "properties": { @@ -803,7 +814,7 @@ } }, "existing_cluster_id": { - "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." + "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." }, "health": { "description": "", @@ -1210,7 +1221,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -1312,7 +1323,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -2093,6 +2104,72 @@ } } }, + "init_scripts": { + "description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "items": { + "description": "", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "file": { + "description": "destination needs to be provided. e.g.\n`{ \"file\" : { \"destination\" : \"file:/my/local/file.sh\" } }`", + "properties": { + "destination": { + "description": "local file destination, e.g. `file:/my/local/file.sh`" + } + } + }, + "s3": { + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + }, + "volumes": { + "description": "destination needs to be provided. e.g.\n`{ \"volumes\" : { \"destination\" : \"/Volumes/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`" + } + } + }, + "workspace": { + "description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`" + } + } + } + } + } + }, "instance_pool_id": { "description": "The optional ID of the instance pool to which the cluster belongs." }, @@ -2368,6 +2445,9 @@ "build": { "description": "" }, + "executable": { + "description": "" + }, "files": { "description": "", "items": { @@ -2394,6 +2474,14 @@ "compute_id": { "description": "" }, + "deployment": { + "description": "", + "properties": { + "fail_on_active_runs": { + "description": "" + } + } + }, "git": { "description": "", "properties": { @@ -3162,7 +3250,7 @@ } }, "existing_cluster_id": { - "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." + "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." }, "health": { "description": "", @@ -3569,7 +3657,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -3671,7 +3759,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -4452,6 +4540,72 @@ } } }, + "init_scripts": { + "description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "items": { + "description": "", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "file": { + "description": "destination needs to be provided. e.g.\n`{ \"file\" : { \"destination\" : \"file:/my/local/file.sh\" } }`", + "properties": { + "destination": { + "description": "local file destination, e.g. `file:/my/local/file.sh`" + } + } + }, + "s3": { + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + }, + "volumes": { + "description": "destination needs to be provided. e.g.\n`{ \"volumes\" : { \"destination\" : \"/Volumes/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`" + } + } + }, + "workspace": { + "description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`" + } + } + } + } + } + }, "instance_pool_id": { "description": "The optional ID of the instance pool to which the cluster belongs." }, diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 339e87c4f..65f1af57b 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -26,10 +26,11 @@ func New() *cobra.Command { limit their use to specific users and groups. With cluster policies, you can: - Auto-install cluster libraries on the next - restart by listing them in the policy's "libraries" field. - Limit users to - creating clusters with the prescribed settings. - Simplify the user interface, - enabling more users to create clusters, by fixing and hiding some fields. - - Manage costs by setting limits on attributes that impact the hourly rate. + restart by listing them in the policy's "libraries" field (Public Preview). - + Limit users to creating clusters with the prescribed settings. - Simplify the + user interface, enabling more users to create clusters, by fixing and hiding + some fields. - Manage costs by setting limits on attributes that impact the + hourly rate. Cluster policy permissions limit which policies a user can select in the Policy drop-down when the user creates a cluster: - A user who has diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index d159ffd7b..b009a1f59 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -84,8 +84,9 @@ func newChangeOwner() *cobra.Command { cmd.Short = `Change cluster owner.` cmd.Long = `Change cluster owner. - Change the owner of the cluster. You must be an admin to perform this - operation. + Change the owner of the cluster. You must be an admin and the cluster must be + terminated to perform this operation. The service principal application ID can + be supplied as an argument to owner_username. Arguments: CLUSTER_ID: diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index f8e911d1f..47ad795e6 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -27,6 +27,7 @@ import ( instance_profiles "github.com/databricks/cli/cmd/workspace/instance-profiles" ip_access_lists "github.com/databricks/cli/cmd/workspace/ip-access-lists" jobs "github.com/databricks/cli/cmd/workspace/jobs" + lakehouse_monitors "github.com/databricks/cli/cmd/workspace/lakehouse-monitors" lakeview "github.com/databricks/cli/cmd/workspace/lakeview" libraries "github.com/databricks/cli/cmd/workspace/libraries" metastores "github.com/databricks/cli/cmd/workspace/metastores" @@ -93,6 +94,7 @@ func All() []*cobra.Command { out = append(out, instance_profiles.New()) out = append(out, ip_access_lists.New()) out = append(out, jobs.New()) + out = append(out, lakehouse_monitors.New()) out = append(out, lakeview.New()) out = append(out, libraries.New()) out = append(out, metastores.New()) diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index cd3227af9..0cd758189 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -122,7 +122,7 @@ func newDelete() *cobra.Command { cmd.Use = "delete DASHBOARD_ID" cmd.Short = `Remove a dashboard.` cmd.Long = `Remove a dashboard. - + Moves a dashboard to the trash. Trashed dashboards do not appear in list views or searches, and cannot be shared.` @@ -196,7 +196,7 @@ func newGet() *cobra.Command { cmd.Use = "get DASHBOARD_ID" cmd.Short = `Retrieve a definition.` cmd.Long = `Retrieve a definition. - + Returns a JSON representation of a dashboard object, including its visualization and query objects.` @@ -275,7 +275,7 @@ func newList() *cobra.Command { cmd.Use = "list" cmd.Short = `Get dashboard objects.` cmd.Long = `Get dashboard objects. - + Fetch a paginated list of dashboard objects.` cmd.Annotations = make(map[string]string) @@ -334,7 +334,7 @@ func newRestore() *cobra.Command { cmd.Use = "restore DASHBOARD_ID" cmd.Short = `Restore a dashboard.` cmd.Long = `Restore a dashboard. - + A restored dashboard appears in list views and searches and can be shared.` cmd.Annotations = make(map[string]string) @@ -388,4 +388,91 @@ func init() { }) } +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *sql.DashboardEditContent, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq sql.DashboardEditContent + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this dashboard that appears in list views and at the top of the dashboard page.`) + cmd.Flags().Var(&updateReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`) + + cmd.Use = "update DASHBOARD_ID" + cmd.Short = `Change a dashboard definition.` + cmd.Long = `Change a dashboard definition. + + Modify this dashboard definition. This operation only affects attributes of + the dashboard object. It does not add, modify, or remove widgets. + + **Note**: You cannot undo this operation.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No DASHBOARD_ID argument specified. Loading names for Dashboards drop-down." + names, err := w.Dashboards.DashboardNameToIdMap(ctx, sql.ListDashboardsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Dashboards drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + updateReq.DashboardId = args[0] + + response, err := w.Dashboards.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdate()) + }) +} + // end service Dashboards diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 79828714c..7bd28938c 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -397,7 +397,9 @@ func newDeleteRuns() *cobra.Command { cmd.Long = `Delete runs by creation time. Bulk delete runs in an experiment that were created prior to or at the - specified timestamp. Deletes at most max_runs per request. + specified timestamp. Deletes at most max_runs per request. To call this API + from a Databricks Notebook in Python, you can use the client code snippet on + https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete. Arguments: EXPERIMENT_ID: The ID of the experiment containing the runs to delete. @@ -1721,7 +1723,9 @@ func newRestoreRuns() *cobra.Command { cmd.Long = `Restore runs by deletion time. Bulk restore runs in an experiment that were deleted no earlier than the - specified timestamp. Restores at most max_runs per request. + specified timestamp. Restores at most max_runs per request. To call this API + from a Databricks Notebook in Python, you can use the client code snippet on + https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore. Arguments: EXPERIMENT_ID: The ID of the experiment containing the runs to restore. diff --git a/cmd/workspace/global-init-scripts/global-init-scripts.go b/cmd/workspace/global-init-scripts/global-init-scripts.go index 1479381da..c40b6785a 100755 --- a/cmd/workspace/global-init-scripts/global-init-scripts.go +++ b/cmd/workspace/global-init-scripts/global-init-scripts.go @@ -301,7 +301,7 @@ func newList() *cobra.Command { Get a list of all global init scripts for this workspace. This returns all properties for each script but **not** the script contents. To retrieve the contents of a script, use the [get a global init - script](#operation/get-script) operation.` + script](:method:globalinitscripts/get) operation.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go new file mode 100755 index 000000000..3a644b933 --- /dev/null +++ b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go @@ -0,0 +1,414 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package lakehouse_monitors + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "lakehouse-monitors", + Short: `A monitor computes and monitors data or model quality metrics for a table over time.`, + Long: `A monitor computes and monitors data or model quality metrics for a table over + time. It generates metrics tables and a dashboard that you can use to monitor + table health and set alerts. + + Most write operations require the user to be the owner of the table (or its + parent schema or parent catalog). Viewing the dashboard, computed metrics, or + monitor configuration only requires the user to have **SELECT** privileges on + the table (along with **USE_SCHEMA** and **USE_CATALOG**).`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *catalog.CreateMonitor, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq catalog.CreateMonitor + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.BaselineTableName, "baseline-table-name", createReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`) + // TODO: array: custom_metrics + // TODO: complex arg: data_classification_config + // TODO: complex arg: inference_log + // TODO: array: notifications + // TODO: complex arg: schedule + cmd.Flags().BoolVar(&createReq.SkipBuiltinDashboard, "skip-builtin-dashboard", createReq.SkipBuiltinDashboard, `Whether to skip creating a default dashboard summarizing data quality metrics.`) + // TODO: array: slicing_exprs + // TODO: output-only field + // TODO: complex arg: time_series + cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `Optional argument to specify the warehouse for dashboard creation.`) + + cmd.Use = "create FULL_NAME ASSETS_DIR OUTPUT_SCHEMA_NAME" + cmd.Short = `Create a table monitor.` + cmd.Long = `Create a table monitor. + + Creates a new monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog, have + **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the + table 2. have **USE_CATALOG** on the table's parent catalog, be an owner of + the table's parent schema, and have **SELECT** access on the table. 3. have + the following permissions: - **USE_CATALOG** on the table's parent catalog - + **USE_SCHEMA** on the table's parent schema - be an owner of the table. + + Workspace assets, such as the dashboard, will be created in the workspace + where this call was made. + + Arguments: + FULL_NAME: Full name of the table. + ASSETS_DIR: The directory to store monitoring assets (e.g. dashboard, metric tables). + OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := cobra.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only FULL_NAME as positional arguments. Provide 'assets_dir', 'output_schema_name' in your JSON input") + } + return nil + } + check := cobra.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } + createReq.FullName = args[0] + if !cmd.Flags().Changed("json") { + createReq.AssetsDir = args[1] + } + if !cmd.Flags().Changed("json") { + createReq.OutputSchemaName = args[2] + } + + response, err := w.LakehouseMonitors.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newCreate()) + }) +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *catalog.DeleteLakehouseMonitorRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq catalog.DeleteLakehouseMonitorRequest + + // TODO: short flags + + cmd.Use = "delete FULL_NAME" + cmd.Short = `Delete a table monitor.` + cmd.Long = `Delete a table monitor. + + Deletes a monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table. + + Additionally, the call must be made from the workspace where the monitor was + created. + + Note that the metric tables and dashboard will not be deleted as part of this + call; those assets must be manually cleaned up (if desired). + + Arguments: + FULL_NAME: Full name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.FullName = args[0] + + err = w.LakehouseMonitors.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDelete()) + }) +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *catalog.GetLakehouseMonitorRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq catalog.GetLakehouseMonitorRequest + + // TODO: short flags + + cmd.Use = "get FULL_NAME" + cmd.Short = `Get a table monitor.` + cmd.Long = `Get a table monitor. + + Gets a monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema. 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + The returned information includes configuration values, as well as information + on assets created by the monitor. Some information (e.g., dashboard) may be + filtered out if the caller is in a different workspace than where the monitor + was created. + + Arguments: + FULL_NAME: Full name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.FullName = args[0] + + response, err := w.LakehouseMonitors.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGet()) + }) +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *catalog.UpdateMonitor, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq catalog.UpdateMonitor + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.BaselineTableName, "baseline-table-name", updateReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`) + // TODO: array: custom_metrics + // TODO: complex arg: data_classification_config + // TODO: complex arg: inference_log + // TODO: array: notifications + // TODO: complex arg: schedule + // TODO: array: slicing_exprs + // TODO: output-only field + // TODO: complex arg: time_series + + cmd.Use = "update FULL_NAME ASSETS_DIR OUTPUT_SCHEMA_NAME" + cmd.Short = `Update a table monitor.` + cmd.Long = `Update a table monitor. + + Updates a monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table. + + Additionally, the call must be made from the workspace where the monitor was + created, and the caller must be the original creator of the monitor. + + Certain configuration fields, such as output asset identifiers, cannot be + updated. + + Arguments: + FULL_NAME: Full name of the table. + ASSETS_DIR: The directory to store monitoring assets (e.g. dashboard, metric tables). + OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := cobra.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only FULL_NAME as positional arguments. Provide 'assets_dir', 'output_schema_name' in your JSON input") + } + return nil + } + check := cobra.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.FullName = args[0] + if !cmd.Flags().Changed("json") { + updateReq.AssetsDir = args[1] + } + if !cmd.Flags().Changed("json") { + updateReq.OutputSchemaName = args[2] + } + + response, err := w.LakehouseMonitors.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdate()) + }) +} + +// end service LakehouseMonitors diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index d35eb3cd8..488977100 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -911,6 +911,7 @@ func newStartUpdate() *cobra.Command { cmd.Flags().BoolVar(&startUpdateReq.FullRefresh, "full-refresh", startUpdateReq.FullRefresh, `If true, this update will reset all tables before running.`) // TODO: array: full_refresh_selection // TODO: array: refresh_selection + cmd.Flags().BoolVar(&startUpdateReq.ValidateOnly, "validate-only", startUpdateReq.ValidateOnly, `If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets.`) cmd.Use = "start-update PIPELINE_ID" cmd.Short = `Start a pipeline.` diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index 38fa9c0c5..c4349213e 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -286,7 +286,10 @@ func newList() *cobra.Command { cmd.Long = `Get a list of queries. Gets a list of queries. Optionally, this list can be filtered by a search - term.` + term. + + ### **Warning: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban.**` cmd.Annotations = make(map[string]string) @@ -422,6 +425,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this query that appears in list views, widget headings, and on the query page.`) // TODO: any: options cmd.Flags().StringVar(&updateReq.Query, "query", updateReq.Query, `The text of the query to be run.`) + cmd.Flags().Var(&updateReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`) cmd.Use = "update QUERY_ID" cmd.Short = `Change a query definition.` diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index e655dfd7c..0dfae0fef 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -123,6 +123,89 @@ func init() { }) } +// start exists command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var existsOverrides []func( + *cobra.Command, + *catalog.ExistsRequest, +) + +func newExists() *cobra.Command { + cmd := &cobra.Command{} + + var existsReq catalog.ExistsRequest + + // TODO: short flags + + cmd.Use = "exists FULL_NAME" + cmd.Short = `Get boolean reflecting if table exists.` + cmd.Long = `Get boolean reflecting if table exists. + + Gets if a table exists in the metastore for a specific catalog and schema. The + caller must satisfy one of the following requirements: * Be a metastore admin + * Be the owner of the parent catalog * Be the owner of the parent schema and + have the USE_CATALOG privilege on the parent catalog * Have the + **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + privilege on the parent schema, and either be the table owner or have the + SELECT privilege on the table. * Have BROWSE privilege on the parent catalog * + Have BROWSE privilege on the parent schema. + + Arguments: + FULL_NAME: Full name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No FULL_NAME argument specified. Loading names for Tables drop-down." + names, err := w.Tables.TableInfoNameToTableIdMap(ctx, catalog.ListTablesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Tables drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Full name of the table") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have full name of the table") + } + existsReq.FullName = args[0] + + response, err := w.Tables.Exists(ctx, existsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range existsOverrides { + fn(cmd, &existsReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newExists()) + }) +} + // start get command // Slice with functions to override default command behavior. @@ -146,10 +229,12 @@ func newGet() *cobra.Command { cmd.Long = `Get a table. Gets a table from the metastore for a specific catalog and schema. The caller - must be a metastore admin, be the owner of the table and have the - **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** - privilege on the parent schema, or be the owner of the table and have the - **SELECT** privilege on it as well. + must satisfy one of the following requirements: * Be a metastore admin * Be + the owner of the parent catalog * Be the owner of the parent schema and have + the USE_CATALOG privilege on the parent catalog * Have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema, and either be the table owner or have the SELECT privilege on the + table. Arguments: FULL_NAME: Full name of the table.` diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index 956555b6d..276de6a8e 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -56,16 +56,16 @@ func newCreateOboToken() *cobra.Command { cmd.Flags().Var(&createOboTokenJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createOboTokenReq.Comment, "comment", createOboTokenReq.Comment, `Comment that describes the purpose of the token.`) + cmd.Flags().Int64Var(&createOboTokenReq.LifetimeSeconds, "lifetime-seconds", createOboTokenReq.LifetimeSeconds, `The number of seconds before the token expires.`) - cmd.Use = "create-obo-token APPLICATION_ID LIFETIME_SECONDS" + cmd.Use = "create-obo-token APPLICATION_ID" cmd.Short = `Create on-behalf token.` cmd.Long = `Create on-behalf token. - + Creates a token on behalf of a service principal. Arguments: - APPLICATION_ID: Application ID of the service principal. - LIFETIME_SECONDS: The number of seconds before the token expires.` + APPLICATION_ID: Application ID of the service principal.` cmd.Annotations = make(map[string]string) @@ -73,12 +73,11 @@ func newCreateOboToken() *cobra.Command { if cmd.Flags().Changed("json") { err := cobra.ExactArgs(0)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'application_id', 'lifetime_seconds' in your JSON input") + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'application_id' in your JSON input") } return nil } - check := cobra.ExactArgs(2) - return check(cmd, args) + return nil } cmd.PreRunE = root.MustWorkspaceClient @@ -91,15 +90,25 @@ func newCreateOboToken() *cobra.Command { if err != nil { return err } - } - if !cmd.Flags().Changed("json") { - createOboTokenReq.ApplicationId = args[0] - } - if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &createOboTokenReq.LifetimeSeconds) - if err != nil { - return fmt.Errorf("invalid LIFETIME_SECONDS: %s", args[1]) + } else { + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No APPLICATION_ID argument specified. Loading names for Token Management drop-down." + names, err := w.TokenManagement.TokenInfoCommentToTokenIdMap(ctx, settings.ListTokenManagementRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Token Management drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Application ID of the service principal") + if err != nil { + return err + } + args = append(args, id) } + if len(args) != 1 { + return fmt.Errorf("expected to have application id of the service principal") + } + createOboTokenReq.ApplicationId = args[0] } response, err := w.TokenManagement.CreateOboToken(ctx, createOboTokenReq) @@ -146,7 +155,7 @@ func newDelete() *cobra.Command { cmd.Use = "delete TOKEN_ID" cmd.Short = `Delete a token.` cmd.Long = `Delete a token. - + Deletes a token, specified by its ID. Arguments: @@ -222,7 +231,7 @@ func newGet() *cobra.Command { cmd.Use = "get TOKEN_ID" cmd.Short = `Get token info.` cmd.Long = `Get token info. - + Gets information about a token, specified by its ID. Arguments: @@ -293,7 +302,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Use = "get-permission-levels" cmd.Short = `Get token permission levels.` cmd.Long = `Get token permission levels. - + Gets the permission levels that a user can have on an object.` cmd.Annotations = make(map[string]string) @@ -341,7 +350,7 @@ func newGetPermissions() *cobra.Command { cmd.Use = "get-permissions" cmd.Short = `Get token permissions.` cmd.Long = `Get token permissions. - + Gets the permissions of all tokens. Tokens can inherit permissions from their root object.` @@ -398,7 +407,7 @@ func newList() *cobra.Command { cmd.Use = "list" cmd.Short = `List all tokens.` cmd.Long = `List all tokens. - + Lists all tokens associated with the specified workspace or user.` cmd.Annotations = make(map[string]string) @@ -461,7 +470,7 @@ func newSetPermissions() *cobra.Command { cmd.Use = "set-permissions" cmd.Short = `Set token permissions.` cmd.Long = `Set token permissions. - + Sets permissions on all tokens. Tokens can inherit permissions from their root object.` @@ -532,7 +541,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Use = "update-permissions" cmd.Short = `Update token permissions.` cmd.Long = `Update token permissions. - + Updates the permissions on all tokens. Tokens can inherit permissions from their root object.` From 80670eceed0e6cf786f0b9824291fdd15a5ddb23 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 14 Feb 2024 19:04:45 +0100 Subject: [PATCH 029/286] Added `bundle deployment bind` and `unbind` command (#1131) ## Changes Added `bundle deployment bind` and `unbind` command. This command allows to bind bundle-defined resources to existing resources in Databricks workspace so they become DABs-managed. ## Tests Manually + added E2E test --- bundle/config/resources.go | 35 ++++ bundle/config/resources/job.go | 24 +++ bundle/config/resources/pipeline.go | 18 ++ bundle/deploy/lock/release.go | 4 + bundle/deploy/terraform/import.go | 108 ++++++++++ bundle/deploy/terraform/unbind.go | 41 ++++ bundle/phases/bind.go | 45 +++++ bundle/phases/destroy.go | 2 +- cmd/bundle/bundle.go | 2 + cmd/bundle/deploy.go | 3 +- cmd/bundle/deployment/bind.go | 65 ++++++ cmd/bundle/deployment/deployment.go | 17 ++ cmd/bundle/deployment/unbind.go | 37 ++++ cmd/bundle/destroy.go | 3 +- cmd/bundle/generate.go | 3 +- cmd/bundle/run.go | 3 +- cmd/bundle/summary.go | 3 +- cmd/bundle/sync.go | 3 +- cmd/bundle/utils/utils.go | 24 +++ cmd/bundle/validate.go | 3 +- cmd/bundle/variables.go | 19 -- internal/bundle/bind_resource_test.go | 185 ++++++++++++++++++ .../template/databricks.yml.tmpl | 4 +- internal/bundle/generate_pipeline_test.go | 18 +- internal/helpers.go | 8 + 25 files changed, 643 insertions(+), 34 deletions(-) create mode 100644 bundle/deploy/terraform/import.go create mode 100644 bundle/deploy/terraform/unbind.go create mode 100644 bundle/phases/bind.go create mode 100644 cmd/bundle/deployment/bind.go create mode 100644 cmd/bundle/deployment/deployment.go create mode 100644 cmd/bundle/deployment/unbind.go create mode 100644 cmd/bundle/utils/utils.go create mode 100644 internal/bundle/bind_resource_test.go diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 2b453c666..d0b64d1a3 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -1,9 +1,11 @@ package config import ( + "context" "fmt" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go" ) // Resources defines Databricks resources associated with the bundle. @@ -168,3 +170,36 @@ func (r *Resources) Merge() error { } return nil } + +type ConfigResource interface { + Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) + TerraformResourceName() string +} + +func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) { + found := make([]ConfigResource, 0) + for k := range r.Jobs { + if k == key { + found = append(found, r.Jobs[k]) + } + } + for k := range r.Pipelines { + if k == key { + found = append(found, r.Pipelines[k]) + } + } + + if len(found) == 0 { + return nil, fmt.Errorf("no such resource: %s", key) + } + + if len(found) > 1 { + keys := make([]string, 0, len(found)) + for _, r := range found { + keys = append(keys, fmt.Sprintf("%s:%s", r.TerraformResourceName(), key)) + } + return nil, fmt.Errorf("ambiguous: %s (can resolve to all of %s)", key, keys) + } + + return found[0], nil +} diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index bd43ed0af..da85f94dc 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -1,7 +1,12 @@ package resources import ( + "context" + "strconv" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/imdario/mergo" @@ -90,3 +95,22 @@ func (j *Job) MergeTasks() error { j.Tasks = tasks return nil } + +func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + jobId, err := strconv.Atoi(id) + if err != nil { + return false, err + } + _, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: int64(jobId), + }) + if err != nil { + log.Debugf(ctx, "job %s does not exist", id) + return false, err + } + return true, nil +} + +func (j *Job) TerraformResourceName() string { + return "databricks_job" +} diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index 43450dc49..97aeef156 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -1,9 +1,12 @@ package resources import ( + "context" "strings" "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/imdario/mergo" @@ -73,3 +76,18 @@ func (p *Pipeline) MergeClusters() error { p.Clusters = output return nil } + +func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.Pipelines.Get(ctx, pipelines.GetPipelineRequest{ + PipelineId: id, + }) + if err != nil { + log.Debugf(ctx, "pipeline %s does not exist", id) + return false, err + } + return true, nil +} + +func (p *Pipeline) TerraformResourceName() string { + return "databricks_pipeline" +} diff --git a/bundle/deploy/lock/release.go b/bundle/deploy/lock/release.go index 68d4e0f93..4ea47c2f9 100644 --- a/bundle/deploy/lock/release.go +++ b/bundle/deploy/lock/release.go @@ -12,6 +12,8 @@ import ( type Goal string const ( + GoalBind = Goal("bind") + GoalUnbind = Goal("unbind") GoalDeploy = Goal("deploy") GoalDestroy = Goal("destroy") ) @@ -46,6 +48,8 @@ func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error { switch m.goal { case GoalDeploy: return b.Locker.Unlock(ctx) + case GoalBind, GoalUnbind: + return b.Locker.Unlock(ctx) case GoalDestroy: return b.Locker.Unlock(ctx, locker.AllowLockFileNotExist) default: diff --git a/bundle/deploy/terraform/import.go b/bundle/deploy/terraform/import.go new file mode 100644 index 000000000..5fc436f20 --- /dev/null +++ b/bundle/deploy/terraform/import.go @@ -0,0 +1,108 @@ +package terraform + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/cmdio" + "github.com/hashicorp/terraform-exec/tfexec" +) + +type BindOptions struct { + AutoApprove bool + ResourceType string + ResourceKey string + ResourceId string +} + +type importResource struct { + opts *BindOptions +} + +// Apply implements bundle.Mutator. +func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) error { + dir, err := Dir(ctx, b) + if err != nil { + return err + } + + tf := b.Terraform + if tf == nil { + return fmt.Errorf("terraform not initialized") + } + + err = tf.Init(ctx, tfexec.Upgrade(true)) + if err != nil { + return fmt.Errorf("terraform init: %w", err) + } + tmpDir, err := os.MkdirTemp("", "state-*") + if err != nil { + return fmt.Errorf("terraform init: %w", err) + } + tmpState := filepath.Join(tmpDir, TerraformStateFileName) + + importAddress := fmt.Sprintf("%s.%s", m.opts.ResourceType, m.opts.ResourceKey) + err = tf.Import(ctx, importAddress, m.opts.ResourceId, tfexec.StateOut(tmpState)) + if err != nil { + return fmt.Errorf("terraform import: %w", err) + } + + buf := bytes.NewBuffer(nil) + tf.SetStdout(buf) + + //lint:ignore SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file + changed, err := tf.Plan(ctx, tfexec.State(tmpState), tfexec.Target(importAddress)) + if err != nil { + return fmt.Errorf("terraform plan: %w", err) + } + + defer os.RemoveAll(tmpDir) + + if changed && !m.opts.AutoApprove { + output := buf.String() + // Remove output starting from Warning until end of output + output = output[:bytes.Index([]byte(output), []byte("Warning:"))] + cmdio.LogString(ctx, output) + ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.") + if err != nil { + return err + } + if !ans { + return fmt.Errorf("import aborted") + } + } + + // If user confirmed changes, move the state file from temp dir to state location + f, err := os.Create(filepath.Join(dir, TerraformStateFileName)) + if err != nil { + return err + } + defer f.Close() + + tmpF, err := os.Open(tmpState) + if err != nil { + return err + } + defer tmpF.Close() + + _, err = io.Copy(f, tmpF) + if err != nil { + return err + } + + return nil +} + +// Name implements bundle.Mutator. +func (*importResource) Name() string { + return "terraform.Import" +} + +func Import(opts *BindOptions) bundle.Mutator { + return &importResource{opts: opts} +} diff --git a/bundle/deploy/terraform/unbind.go b/bundle/deploy/terraform/unbind.go new file mode 100644 index 000000000..74e15e184 --- /dev/null +++ b/bundle/deploy/terraform/unbind.go @@ -0,0 +1,41 @@ +package terraform + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/hashicorp/terraform-exec/tfexec" +) + +type unbind struct { + resourceType string + resourceKey string +} + +func (m *unbind) Apply(ctx context.Context, b *bundle.Bundle) error { + tf := b.Terraform + if tf == nil { + return fmt.Errorf("terraform not initialized") + } + + err := tf.Init(ctx, tfexec.Upgrade(true)) + if err != nil { + return fmt.Errorf("terraform init: %w", err) + } + + err = tf.StateRm(ctx, fmt.Sprintf("%s.%s", m.resourceType, m.resourceKey)) + if err != nil { + return fmt.Errorf("terraform state rm: %w", err) + } + + return nil +} + +func (*unbind) Name() string { + return "terraform.Unbind" +} + +func Unbind(resourceType string, resourceKey string) bundle.Mutator { + return &unbind{resourceType: resourceType, resourceKey: resourceKey} +} diff --git a/bundle/phases/bind.go b/bundle/phases/bind.go new file mode 100644 index 000000000..b2e92d6e2 --- /dev/null +++ b/bundle/phases/bind.go @@ -0,0 +1,45 @@ +package phases + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/lock" + "github.com/databricks/cli/bundle/deploy/terraform" +) + +func Bind(opts *terraform.BindOptions) bundle.Mutator { + return newPhase( + "bind", + []bundle.Mutator{ + lock.Acquire(), + bundle.Defer( + bundle.Seq( + terraform.StatePull(), + terraform.Interpolate(), + terraform.Write(), + terraform.Import(opts), + terraform.StatePush(), + ), + lock.Release(lock.GoalBind), + ), + }, + ) +} + +func Unbind(resourceType string, resourceKey string) bundle.Mutator { + return newPhase( + "unbind", + []bundle.Mutator{ + lock.Acquire(), + bundle.Defer( + bundle.Seq( + terraform.StatePull(), + terraform.Interpolate(), + terraform.Write(), + terraform.Unbind(resourceType, resourceKey), + terraform.StatePush(), + ), + lock.Release(lock.GoalUnbind), + ), + }, + ) +} diff --git a/bundle/phases/destroy.go b/bundle/phases/destroy.go index 216d29210..f974a0565 100644 --- a/bundle/phases/destroy.go +++ b/bundle/phases/destroy.go @@ -14,9 +14,9 @@ func Destroy() bundle.Mutator { lock.Acquire(), bundle.Defer( bundle.Seq( + terraform.StatePull(), terraform.Interpolate(), terraform.Write(), - terraform.StatePull(), terraform.Plan(terraform.PlanGoal("destroy")), terraform.Destroy(), terraform.StatePush(), diff --git a/cmd/bundle/bundle.go b/cmd/bundle/bundle.go index a82311d83..43a9ef680 100644 --- a/cmd/bundle/bundle.go +++ b/cmd/bundle/bundle.go @@ -1,6 +1,7 @@ package bundle import ( + "github.com/databricks/cli/cmd/bundle/deployment" "github.com/spf13/cobra" ) @@ -24,5 +25,6 @@ func New() *cobra.Command { cmd.AddCommand(newInitCommand()) cmd.AddCommand(newSummaryCommand()) cmd.AddCommand(newGenerateCommand()) + cmd.AddCommand(deployment.NewDeploymentCommand()) return cmd } diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index a83c268bc..c76789c17 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -3,6 +3,7 @@ package bundle import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" "github.com/spf13/cobra" ) @@ -10,7 +11,7 @@ func newDeployCommand() *cobra.Command { cmd := &cobra.Command{ Use: "deploy", Short: "Deploy bundle", - PreRunE: ConfigureBundleWithVariables, + PreRunE: utils.ConfigureBundleWithVariables, } var force bool diff --git a/cmd/bundle/deployment/bind.go b/cmd/bundle/deployment/bind.go new file mode 100644 index 000000000..541292807 --- /dev/null +++ b/cmd/bundle/deployment/bind.go @@ -0,0 +1,65 @@ +package deployment + +import ( + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/libs/cmdio" + "github.com/spf13/cobra" +) + +func newBindCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "bind KEY RESOURCE_ID", + Short: "Bind bundle-defined resources to existing resources", + Args: cobra.ExactArgs(2), + PreRunE: utils.ConfigureBundleWithVariables, + } + + var autoApprove bool + var forceLock bool + cmd.Flags().BoolVar(&autoApprove, "auto-approve", false, "Automatically approve the binding") + cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + b := bundle.Get(cmd.Context()) + r := b.Config.Resources + resource, err := r.FindResourceByConfigKey(args[0]) + if err != nil { + return err + } + + w := b.WorkspaceClient() + ctx := cmd.Context() + exists, err := resource.Exists(ctx, w, args[1]) + if err != nil { + return fmt.Errorf("failed to fetch the resource, err: %w", err) + } + + if !exists { + return fmt.Errorf("%s with an id '%s' is not found", resource.TerraformResourceName(), args[1]) + } + + b.Config.Bundle.Deployment.Lock.Force = forceLock + err = bundle.Apply(cmd.Context(), b, bundle.Seq( + phases.Initialize(), + phases.Bind(&terraform.BindOptions{ + AutoApprove: autoApprove, + ResourceType: resource.TerraformResourceName(), + ResourceKey: args[0], + ResourceId: args[1], + }), + )) + if err != nil { + return fmt.Errorf("failed to bind the resource, err: %w", err) + } + + cmdio.LogString(ctx, fmt.Sprintf("Successfully bound %s with an id '%s'. Run 'bundle deploy' to deploy changes to your workspace", resource.TerraformResourceName(), args[1])) + return nil + } + + return cmd +} diff --git a/cmd/bundle/deployment/deployment.go b/cmd/bundle/deployment/deployment.go new file mode 100644 index 000000000..d29a8e72b --- /dev/null +++ b/cmd/bundle/deployment/deployment.go @@ -0,0 +1,17 @@ +package deployment + +import ( + "github.com/spf13/cobra" +) + +func NewDeploymentCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "deployment", + Short: "Deployment related commands", + Long: "Deployment related commands", + } + + cmd.AddCommand(newBindCommand()) + cmd.AddCommand(newUnbindCommand()) + return cmd +} diff --git a/cmd/bundle/deployment/unbind.go b/cmd/bundle/deployment/unbind.go new file mode 100644 index 000000000..e7de8a3d4 --- /dev/null +++ b/cmd/bundle/deployment/unbind.go @@ -0,0 +1,37 @@ +package deployment + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" + "github.com/spf13/cobra" +) + +func newUnbindCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "unbind KEY", + Short: "Unbind bundle-defined resources from its managed remote resource", + Args: cobra.ExactArgs(1), + PreRunE: utils.ConfigureBundleWithVariables, + } + + var forceLock bool + cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + b := bundle.Get(cmd.Context()) + r := b.Config.Resources + resource, err := r.FindResourceByConfigKey(args[0]) + if err != nil { + return err + } + + b.Config.Bundle.Deployment.Lock.Force = forceLock + return bundle.Apply(cmd.Context(), b, bundle.Seq( + phases.Initialize(), + phases.Unbind(resource.TerraformResourceName(), args[0]), + )) + } + + return cmd +} diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index dad199bf9..a0bfb1a4a 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" @@ -17,7 +18,7 @@ func newDestroyCommand() *cobra.Command { Use: "destroy", Short: "Destroy deployed bundle resources", - PreRunE: ConfigureBundleWithVariables, + PreRunE: utils.ConfigureBundleWithVariables, } var autoApprove bool diff --git a/cmd/bundle/generate.go b/cmd/bundle/generate.go index 89d7c6adc..6c48b1586 100644 --- a/cmd/bundle/generate.go +++ b/cmd/bundle/generate.go @@ -2,6 +2,7 @@ package bundle import ( "github.com/databricks/cli/cmd/bundle/generate" + "github.com/databricks/cli/cmd/bundle/utils" "github.com/spf13/cobra" ) @@ -12,7 +13,7 @@ func newGenerateCommand() *cobra.Command { Use: "generate", Short: "Generate bundle configuration", Long: "Generate bundle configuration", - PreRunE: ConfigureBundleWithVariables, + PreRunE: utils.ConfigureBundleWithVariables, } cmd.AddCommand(generate.NewGenerateJobCommand()) diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index c1a8d4ea9..54aa6ae75 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/bundle/run" + "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -20,7 +21,7 @@ func newRunCommand() *cobra.Command { Short: "Run a resource (e.g. a job or a pipeline)", Args: cobra.MaximumNArgs(1), - PreRunE: ConfigureBundleWithVariables, + PreRunE: utils.ConfigureBundleWithVariables, } var runOptions run.Options diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index efa3c679d..596f7d3d8 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" @@ -20,7 +21,7 @@ func newSummaryCommand() *cobra.Command { Use: "summary", Short: "Describe the bundle resources and their deployment states", - PreRunE: ConfigureBundleWithVariables, + PreRunE: utils.ConfigureBundleWithVariables, // This command is currently intended for the Databricks VSCode extension only Hidden: true, diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index ca81275b7..d9f8582c2 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/sync" "github.com/spf13/cobra" @@ -48,7 +49,7 @@ func newSyncCommand() *cobra.Command { Short: "Synchronize bundle tree to the workspace", Args: cobra.NoArgs, - PreRunE: ConfigureBundleWithVariables, + PreRunE: utils.ConfigureBundleWithVariables, } var f syncFlags diff --git a/cmd/bundle/utils/utils.go b/cmd/bundle/utils/utils.go new file mode 100644 index 000000000..f68ab06b0 --- /dev/null +++ b/cmd/bundle/utils/utils.go @@ -0,0 +1,24 @@ +package utils + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/cmd/root" + "github.com/spf13/cobra" +) + +func ConfigureBundleWithVariables(cmd *cobra.Command, args []string) error { + // Load bundle config and apply target + err := root.MustConfigureBundle(cmd, args) + if err != nil { + return err + } + + variables, err := cmd.Flags().GetStringSlice("var") + if err != nil { + return err + } + + // Initialize variables by assigning them values passed as command line flags + b := bundle.Get(cmd.Context()) + return b.Config.InitializeVariables(variables) +} diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index b98cbd52d..01b8c18ac 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" "github.com/spf13/cobra" ) @@ -13,7 +14,7 @@ func newValidateCommand() *cobra.Command { Use: "validate", Short: "Validate configuration", - PreRunE: ConfigureBundleWithVariables, + PreRunE: utils.ConfigureBundleWithVariables, } cmd.RunE = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/bundle/variables.go b/cmd/bundle/variables.go index c3e4af645..f8f5167ea 100644 --- a/cmd/bundle/variables.go +++ b/cmd/bundle/variables.go @@ -1,28 +1,9 @@ package bundle import ( - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) -func ConfigureBundleWithVariables(cmd *cobra.Command, args []string) error { - // Load bundle config and apply target - err := root.MustConfigureBundle(cmd, args) - if err != nil { - return err - } - - variables, err := cmd.Flags().GetStringSlice("var") - if err != nil { - return err - } - - // Initialize variables by assigning them values passed as command line flags - b := bundle.Get(cmd.Context()) - return b.Config.InitializeVariables(variables) -} - func initVariableFlag(cmd *cobra.Command) { cmd.PersistentFlags().StringSlice("var", []string{}, `set values for variables defined in bundle config. Example: --var="foo=bar"`) } diff --git a/internal/bundle/bind_resource_test.go b/internal/bundle/bind_resource_test.go new file mode 100644 index 000000000..d44ad2c31 --- /dev/null +++ b/internal/bundle/bind_resource_test.go @@ -0,0 +1,185 @@ +package bundle + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccBindJobToExistingJob(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + ctx, wt := acc.WorkspaceTest(t) + gt := &generateJobTest{T: t, w: wt.W} + + nodeTypeId := internal.GetNodeTypeId(env) + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "spark_version": "13.3.x-scala2.12", + "node_type_id": nodeTypeId, + }) + require.NoError(t, err) + + jobId := gt.createTestJob(ctx) + t.Cleanup(func() { + gt.destroyJob(ctx, jobId) + require.NoError(t, err) + }) + + t.Setenv("BUNDLE_ROOT", bundleRoot) + c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") + _, _, err = c.Run() + require.NoError(t, err) + + // Remove .databricks directory to simulate a fresh deployment + err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + // Check that job is bound and updated with config from bundle + job, err := w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: jobId, + }) + require.NoError(t, err) + require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") + + c = internal.NewCobraTestRunner(t, "bundle", "deployment", "unbind", "foo") + _, _, err = c.Run() + require.NoError(t, err) + + // Remove .databricks directory to simulate a fresh deployment + err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) + require.NoError(t, err) + + err = destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + // Check that job is unbound and exists after bundle is destroyed + job, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: jobId, + }) + require.NoError(t, err) + require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") +} + +func TestAccAbortBind(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + ctx, wt := acc.WorkspaceTest(t) + gt := &generateJobTest{T: t, w: wt.W} + + nodeTypeId := internal.GetNodeTypeId(env) + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "spark_version": "13.3.x-scala2.12", + "node_type_id": nodeTypeId, + }) + require.NoError(t, err) + + jobId := gt.createTestJob(ctx) + t.Cleanup(func() { + gt.destroyJob(ctx, jobId) + destroyBundle(t, ctx, bundleRoot) + }) + + t.Setenv("BUNDLE_ROOT", bundleRoot) + c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) + + // Simulate user aborting the bind. This is done by not providing any input to the prompt in non-interactive mode. + _, _, err = c.Run() + require.ErrorContains(t, err, "failed to bind the resource") + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + // Check that job is not bound and not updated with config from bundle + job, err := w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: jobId, + }) + require.NoError(t, err) + + require.NotEqual(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.Contains(t, job.Settings.Tasks[0].NotebookTask.NotebookPath, "test") +} + +func TestAccGenerateAndBind(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + ctx, wt := acc.WorkspaceTest(t) + gt := &generateJobTest{T: t, w: wt.W} + + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + "unique_id": uniqueId, + }) + require.NoError(t, err) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + jobId := gt.createTestJob(ctx) + t.Cleanup(func() { + _, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: jobId, + }) + if err == nil { + gt.destroyJob(ctx, jobId) + } + }) + + t.Setenv("BUNDLE_ROOT", bundleRoot) + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "job", + "--key", "test_job_key", + "--existing-job-id", fmt.Sprint(jobId), + "--config-dir", filepath.Join(bundleRoot, "resources"), + "--source-dir", filepath.Join(bundleRoot, "src")) + _, _, err = c.Run() + require.NoError(t, err) + + _, err = os.Stat(filepath.Join(bundleRoot, "src", "test.py")) + require.NoError(t, err) + + matches, err := filepath.Glob(filepath.Join(bundleRoot, "resources", "test_job_key.yml")) + require.NoError(t, err) + + require.Len(t, matches, 1) + + c = internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve") + _, _, err = c.Run() + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + err = destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + // Check that job is bound and does not extsts after bundle is destroyed + _, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: jobId, + }) + require.ErrorContains(t, err, "does not exist.") +} diff --git a/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl b/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl index 5d17e0fda..85d31ce3e 100644 --- a/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl +++ b/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl @@ -4,5 +4,5 @@ bundle: workspace: root_path: "~/.bundle/{{.unique_id}}" -includes: - - resources/*yml +include: + - resources/*.yml diff --git a/internal/bundle/generate_pipeline_test.go b/internal/bundle/generate_pipeline_test.go index 0005e29fa..b8a1ac849 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/internal/bundle/generate_pipeline_test.go @@ -28,7 +28,7 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { }) require.NoError(t, err) - pipelineId := gt.createTestPipeline(ctx) + pipelineId, name := gt.createTestPipeline(ctx) t.Cleanup(func() { gt.destroyPipeline(ctx, pipelineId) }) @@ -52,9 +52,16 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { require.Len(t, matches, 1) // check the content of generated yaml - data, err := os.ReadFile(matches[0]) + fileName := matches[0] + data, err := os.ReadFile(fileName) require.NoError(t, err) generatedYaml := string(data) + + // Replace pipeline name + generatedYaml = strings.ReplaceAll(generatedYaml, name, internal.RandomName("copy-generated-pipeline-")) + err = os.WriteFile(fileName, []byte(generatedYaml), 0644) + require.NoError(t, err) + require.Contains(t, generatedYaml, "libraries:") require.Contains(t, generatedYaml, "- notebook:") require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "notebook.py"))) @@ -73,7 +80,7 @@ type generatePipelineTest struct { w *databricks.WorkspaceClient } -func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) string { +func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, string) { t := gt.T w := gt.w @@ -87,8 +94,9 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) string { err = f.Write(ctx, "test.py", strings.NewReader("print('Hello!')")) require.NoError(t, err) + name := internal.RandomName("generated-pipeline-") resp, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ - Name: internal.RandomName("generated-pipeline-"), + Name: name, Libraries: []pipelines.PipelineLibrary{ { Notebook: &pipelines.NotebookLibrary{ @@ -104,7 +112,7 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) string { }) require.NoError(t, err) - return resp.PipelineId + return resp.PipelineId, name } func (gt *generatePipelineTest) destroyPipeline(ctx context.Context, pipelineId string) { diff --git a/internal/helpers.go b/internal/helpers.go index 22e38e211..6377ae07e 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -131,6 +131,14 @@ func (t *cobraTestRunner) WaitForTextPrinted(text string, timeout time.Duration) }, timeout, 50*time.Millisecond) } +func (t *cobraTestRunner) WaitForOutput(text string, timeout time.Duration) { + require.Eventually(t.T, func() bool { + currentStdout := t.stdout.String() + currentErrout := t.stderr.String() + return strings.Contains(currentStdout, text) || strings.Contains(currentErrout, text) + }, timeout, 50*time.Millisecond) +} + func (t *cobraTestRunner) WithStdin() { reader, writer := io.Pipe() t.stdinR = reader From 299e9b56a608de2eef1207f2626f8b931bd9cbf3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 15:52:17 +0100 Subject: [PATCH 030/286] Bump github.com/databricks/databricks-sdk-go from 0.30.1 to 0.32.0 (#1199) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.30.1 to 0.32.0. --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- bundle/schema/docs/bundle_descriptions.json | 244 ++++++++- cmd/account/settings/settings.go | 68 +-- cmd/workspace/clean-rooms/clean-rooms.go | 18 +- cmd/workspace/connections/connections.go | 23 +- .../credentials-manager.go | 4 +- cmd/workspace/dashboards/dashboards.go | 5 +- .../lakehouse-monitors/lakehouse-monitors.go | 301 ++++++++++ cmd/workspace/lakeview/lakeview.go | 3 - cmd/workspace/metastores/metastores.go | 1 - cmd/workspace/pipelines/pipelines.go | 90 --- .../registered-models/registered-models.go | 1 - cmd/workspace/schemas/schemas.go | 1 - cmd/workspace/settings/settings.go | 512 ++++++++++++------ .../vector-search-indexes.go | 2 +- cmd/workspace/volumes/volumes.go | 35 +- go.mod | 20 +- go.sum | 44 +- 18 files changed, 961 insertions(+), 413 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index f705ffea6..bf3a5ea97 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -e05401ed5dd4974c5333d737ec308a7d451f749f \ No newline at end of file +c40670f5a2055c92cf0a6aac92a5bccebfb80866 \ No newline at end of file diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index fb28247ac..228f6e13f 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -193,7 +193,7 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Whether this trigger is paused or not." } } }, @@ -500,6 +500,12 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." } } }, @@ -508,6 +514,14 @@ "items": { "description": "", "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, "dbfs": { "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", "properties": { @@ -524,6 +538,14 @@ } } }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -703,7 +725,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Whether this trigger is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -757,11 +779,14 @@ "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used." }, "project_directory": { - "description": "Optional (relative) path to the project directory, if no value is provided, the root of the git repository is used." + "description": "Path to the project directory. Optional for Git sourced tasks, in which case if no value is provided, the root of the Git repository is used." }, "schema": { "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, + "source": { + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." } @@ -816,6 +841,7 @@ "existing_cluster_id": { "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." }, + "for_each_task": null, "health": { "description": "", "properties": { @@ -1082,6 +1108,12 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." } } }, @@ -1090,6 +1122,14 @@ "items": { "description": "", "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, "dbfs": { "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", "properties": { @@ -1106,6 +1146,14 @@ } } }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -1212,7 +1260,7 @@ "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", "properties": { "base_parameters": { - "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n", + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n", "additionalproperties": { "description": "" } @@ -1303,7 +1351,7 @@ "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail." }, "parameters": { - "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Parameters passed to the main method.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", "items": { "description": "" } @@ -1314,7 +1362,7 @@ "description": "If spark_python_task, indicates that this task must run a Python file.", "properties": { "parameters": { - "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Command line parameters passed to the Python file.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", "items": { "description": "" } @@ -1331,7 +1379,7 @@ "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n", "properties": { "parameters": { - "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Command-line parameters passed to spark submit.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", "items": { "description": "" } @@ -1398,7 +1446,10 @@ "description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", "properties": { "path": { - "description": "Relative path of the SQL file in the remote Git repository." + "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." + }, + "source": { + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -1483,7 +1534,7 @@ "description": "An optional timeout applied to each run of this job. A value of `0` means no timeout." }, "trigger": { - "description": "Trigger settings for the job. Can be used to trigger a run when new files arrive in an external location. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "description": "A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "file_arrival": { "description": "File arrival trigger settings.", @@ -1492,7 +1543,7 @@ "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n" }, "url": { - "description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location." + "description": "The storage location to monitor for file arrivals. The value must point to the root or a subpath of an external location URL or the root or subpath of a Unity Catalog volume." }, "wait_after_last_change_seconds": { "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n" @@ -1500,7 +1551,27 @@ } }, "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Whether this trigger is paused or not." + }, + "table": { + "description": "Table trigger settings.", + "properties": { + "condition": { + "description": "The table(s) condition based on which to trigger a job run." + }, + "min_time_between_triggers_seconds": { + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.\n" + }, + "table_names": { + "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "items": { + "description": "" + } + }, + "wait_after_last_change_seconds": { + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds.\n" + } + } } } }, @@ -1969,10 +2040,13 @@ "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", "properties": { "max_workers": { - "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + "description": "The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` must be strictly greater than `min_workers`." }, "min_workers": { - "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + "description": "The minimum number of workers the cluster can scale down to when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + }, + "mode": { + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n" } } }, @@ -2101,6 +2175,12 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." } } }, @@ -2109,6 +2189,14 @@ "items": { "description": "", "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, "dbfs": { "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", "properties": { @@ -2125,6 +2213,14 @@ } } }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -2629,7 +2725,7 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Whether this trigger is paused or not." } } }, @@ -2936,6 +3032,12 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." } } }, @@ -2944,6 +3046,14 @@ "items": { "description": "", "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, "dbfs": { "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", "properties": { @@ -2960,6 +3070,14 @@ } } }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -3139,7 +3257,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Whether this trigger is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -3193,11 +3311,14 @@ "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used." }, "project_directory": { - "description": "Optional (relative) path to the project directory, if no value is provided, the root of the git repository is used." + "description": "Path to the project directory. Optional for Git sourced tasks, in which case if no value is provided, the root of the Git repository is used." }, "schema": { "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, + "source": { + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." } @@ -3252,6 +3373,7 @@ "existing_cluster_id": { "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." }, + "for_each_task": null, "health": { "description": "", "properties": { @@ -3518,6 +3640,12 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." } } }, @@ -3526,6 +3654,14 @@ "items": { "description": "", "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, "dbfs": { "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", "properties": { @@ -3542,6 +3678,14 @@ } } }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -3648,7 +3792,7 @@ "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", "properties": { "base_parameters": { - "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n", + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n", "additionalproperties": { "description": "" } @@ -3739,7 +3883,7 @@ "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail." }, "parameters": { - "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Parameters passed to the main method.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", "items": { "description": "" } @@ -3750,7 +3894,7 @@ "description": "If spark_python_task, indicates that this task must run a Python file.", "properties": { "parameters": { - "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Command line parameters passed to the Python file.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", "items": { "description": "" } @@ -3767,7 +3911,7 @@ "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n", "properties": { "parameters": { - "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Command-line parameters passed to spark submit.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", "items": { "description": "" } @@ -3834,7 +3978,10 @@ "description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", "properties": { "path": { - "description": "Relative path of the SQL file in the remote Git repository." + "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." + }, + "source": { + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -3919,7 +4066,7 @@ "description": "An optional timeout applied to each run of this job. A value of `0` means no timeout." }, "trigger": { - "description": "Trigger settings for the job. Can be used to trigger a run when new files arrive in an external location. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "description": "A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "file_arrival": { "description": "File arrival trigger settings.", @@ -3928,7 +4075,7 @@ "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n" }, "url": { - "description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location." + "description": "The storage location to monitor for file arrivals. The value must point to the root or a subpath of an external location URL or the root or subpath of a Unity Catalog volume." }, "wait_after_last_change_seconds": { "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n" @@ -3936,7 +4083,27 @@ } }, "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Whether this trigger is paused or not." + }, + "table": { + "description": "Table trigger settings.", + "properties": { + "condition": { + "description": "The table(s) condition based on which to trigger a job run." + }, + "min_time_between_triggers_seconds": { + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.\n" + }, + "table_names": { + "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "items": { + "description": "" + } + }, + "wait_after_last_change_seconds": { + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds.\n" + } + } } } }, @@ -4405,10 +4572,13 @@ "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", "properties": { "max_workers": { - "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + "description": "The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` must be strictly greater than `min_workers`." }, "min_workers": { - "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + "description": "The minimum number of workers the cluster can scale down to when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + }, + "mode": { + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n" } } }, @@ -4537,6 +4707,12 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." } } }, @@ -4545,6 +4721,14 @@ "items": { "description": "", "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, "dbfs": { "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", "properties": { @@ -4561,6 +4745,14 @@ } } }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { diff --git a/cmd/account/settings/settings.go b/cmd/account/settings/settings.go index e22b9950a..adeda73d9 100755 --- a/cmd/account/settings/settings.go +++ b/cmd/account/settings/settings.go @@ -3,6 +3,8 @@ package settings import ( + "fmt" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -60,25 +62,18 @@ func newDeletePersonalComputeSetting() *cobra.Command { // TODO: short flags - cmd.Use = "delete-personal-compute-setting ETAG" + cmd.Flags().StringVar(&deletePersonalComputeSettingReq.Etag, "etag", deletePersonalComputeSettingReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete-personal-compute-setting" cmd.Short = `Delete Personal Compute setting.` cmd.Long = `Delete Personal Compute setting. - Reverts back the Personal Compute setting value to default (ON) - - Arguments: - ETAG: etag used for versioning. The response is at least as fresh as the eTag - provided. This is used for optimistic concurrency control as a way to help - prevent simultaneous writes of a setting overwriting each other. It is - strongly suggested that systems make use of the etag in the read -> delete - pattern to perform setting deletions in order to avoid race conditions. - That is, get an etag from a GET request, and pass it with the DELETE - request to identify the rule set version you are deleting.` + Reverts back the Personal Compute setting value to default (ON)` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := cobra.ExactArgs(0) return check(cmd, args) } @@ -87,8 +82,6 @@ func newDeletePersonalComputeSetting() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - deletePersonalComputeSettingReq.Etag = args[0] - response, err := a.Settings.DeletePersonalComputeSetting(ctx, deletePersonalComputeSettingReq) if err != nil { return err @@ -114,41 +107,34 @@ func init() { }) } -// start read-personal-compute-setting command +// start get-personal-compute-setting command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var readPersonalComputeSettingOverrides []func( +var getPersonalComputeSettingOverrides []func( *cobra.Command, - *settings.ReadPersonalComputeSettingRequest, + *settings.GetPersonalComputeSettingRequest, ) -func newReadPersonalComputeSetting() *cobra.Command { +func newGetPersonalComputeSetting() *cobra.Command { cmd := &cobra.Command{} - var readPersonalComputeSettingReq settings.ReadPersonalComputeSettingRequest + var getPersonalComputeSettingReq settings.GetPersonalComputeSettingRequest // TODO: short flags - cmd.Use = "read-personal-compute-setting ETAG" + cmd.Flags().StringVar(&getPersonalComputeSettingReq.Etag, "etag", getPersonalComputeSettingReq.Etag, `etag used for versioning.`) + + cmd.Use = "get-personal-compute-setting" cmd.Short = `Get Personal Compute setting.` cmd.Long = `Get Personal Compute setting. - Gets the value of the Personal Compute setting. - - Arguments: - ETAG: etag used for versioning. The response is at least as fresh as the eTag - provided. This is used for optimistic concurrency control as a way to help - prevent simultaneous writes of a setting overwriting each other. It is - strongly suggested that systems make use of the etag in the read -> delete - pattern to perform setting deletions in order to avoid race conditions. - That is, get an etag from a GET request, and pass it with the DELETE - request to identify the rule set version you are deleting.` + Gets the value of the Personal Compute setting.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := cobra.ExactArgs(0) return check(cmd, args) } @@ -157,9 +143,7 @@ func newReadPersonalComputeSetting() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - readPersonalComputeSettingReq.Etag = args[0] - - response, err := a.Settings.ReadPersonalComputeSetting(ctx, readPersonalComputeSettingReq) + response, err := a.Settings.GetPersonalComputeSetting(ctx, getPersonalComputeSettingReq) if err != nil { return err } @@ -171,8 +155,8 @@ func newReadPersonalComputeSetting() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range readPersonalComputeSettingOverrides { - fn(cmd, &readPersonalComputeSettingReq) + for _, fn := range getPersonalComputeSettingOverrides { + fn(cmd, &getPersonalComputeSettingReq) } return cmd @@ -180,7 +164,7 @@ func newReadPersonalComputeSetting() *cobra.Command { func init() { cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReadPersonalComputeSetting()) + cmd.AddCommand(newGetPersonalComputeSetting()) }) } @@ -202,9 +186,6 @@ func newUpdatePersonalComputeSetting() *cobra.Command { // TODO: short flags cmd.Flags().Var(&updatePersonalComputeSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().BoolVar(&updatePersonalComputeSettingReq.AllowMissing, "allow-missing", updatePersonalComputeSettingReq.AllowMissing, `This should always be set to true for Settings RPCs.`) - // TODO: complex arg: setting - cmd.Use = "update-personal-compute-setting" cmd.Short = `Update Personal Compute setting.` cmd.Long = `Update Personal Compute setting. @@ -213,11 +194,6 @@ func newUpdatePersonalComputeSetting() *cobra.Command { cmd.Annotations = make(map[string]string) - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -228,6 +204,8 @@ func newUpdatePersonalComputeSetting() *cobra.Command { if err != nil { return err } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } response, err := a.Settings.UpdatePersonalComputeSetting(ctx, updatePersonalComputeSettingReq) diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index 99d732f90..cac5de34c 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -127,7 +127,7 @@ func newDelete() *cobra.Command { // TODO: short flags - cmd.Use = "delete NAME_ARG" + cmd.Use = "delete NAME" cmd.Short = `Delete a clean room.` cmd.Long = `Delete a clean room. @@ -135,7 +135,7 @@ func newDelete() *cobra.Command { owner of the clean room. Arguments: - NAME_ARG: The name of the clean room.` + NAME: The name of the clean room.` cmd.Annotations = make(map[string]string) @@ -149,7 +149,7 @@ func newDelete() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - deleteReq.NameArg = args[0] + deleteReq.Name = args[0] err = w.CleanRooms.Delete(ctx, deleteReq) if err != nil { @@ -194,7 +194,7 @@ func newGet() *cobra.Command { cmd.Flags().BoolVar(&getReq.IncludeRemoteDetails, "include-remote-details", getReq.IncludeRemoteDetails, `Whether to include remote details (central) on the clean room.`) - cmd.Use = "get NAME_ARG" + cmd.Use = "get NAME" cmd.Short = `Get a clean room.` cmd.Long = `Get a clean room. @@ -202,7 +202,7 @@ func newGet() *cobra.Command { metastore admin or the owner of the clean room. Arguments: - NAME_ARG: The name of the clean room.` + NAME: The name of the clean room.` cmd.Annotations = make(map[string]string) @@ -216,7 +216,7 @@ func newGet() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getReq.NameArg = args[0] + getReq.Name = args[0] response, err := w.CleanRooms.Get(ctx, getReq) if err != nil { @@ -329,7 +329,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of clean room.`) - cmd.Use = "update NAME_ARG" + cmd.Use = "update NAME" cmd.Short = `Update a clean room.` cmd.Long = `Update a clean room. @@ -349,7 +349,7 @@ func newUpdate() *cobra.Command { Table removals through **update** do not require additional privileges. Arguments: - NAME_ARG: The name of the clean room.` + NAME: The name of the clean room.` cmd.Annotations = make(map[string]string) @@ -369,7 +369,7 @@ func newUpdate() *cobra.Command { return err } } - updateReq.NameArg = args[0] + updateReq.Name = args[0] response, err := w.CleanRooms.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index 5ad0c199b..e28004c0d 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -134,14 +134,14 @@ func newDelete() *cobra.Command { // TODO: short flags - cmd.Use = "delete NAME_ARG" + cmd.Use = "delete NAME" cmd.Short = `Delete a connection.` cmd.Long = `Delete a connection. Deletes the connection that matches the supplied name. Arguments: - NAME_ARG: The name of the connection to be deleted.` + NAME: The name of the connection to be deleted.` cmd.Annotations = make(map[string]string) @@ -152,7 +152,7 @@ func newDelete() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No NAME_ARG argument specified. Loading names for Connections drop-down." + promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down." names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx) close(promptSpinner) if err != nil { @@ -167,7 +167,7 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the name of the connection to be deleted") } - deleteReq.NameArg = args[0] + deleteReq.Name = args[0] err = w.Connections.Delete(ctx, deleteReq) if err != nil { @@ -210,14 +210,14 @@ func newGet() *cobra.Command { // TODO: short flags - cmd.Use = "get NAME_ARG" + cmd.Use = "get NAME" cmd.Short = `Get a connection.` cmd.Long = `Get a connection. Gets a connection from it's name. Arguments: - NAME_ARG: Name of the connection.` + NAME: Name of the connection.` cmd.Annotations = make(map[string]string) @@ -228,7 +228,7 @@ func newGet() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No NAME_ARG argument specified. Loading names for Connections drop-down." + promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down." names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx) close(promptSpinner) if err != nil { @@ -243,7 +243,7 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have name of the connection") } - getReq.NameArg = args[0] + getReq.Name = args[0] response, err := w.Connections.Get(ctx, getReq) if err != nil { @@ -336,18 +336,17 @@ func newUpdate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the connection.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the connection.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of the connection.`) - cmd.Use = "update NAME_ARG" + cmd.Use = "update NAME" cmd.Short = `Update a connection.` cmd.Long = `Update a connection. Updates the connection that matches the supplied name. Arguments: - NAME_ARG: Name of the connection.` + NAME: Name of the connection.` cmd.Annotations = make(map[string]string) @@ -369,7 +368,7 @@ func newUpdate() *cobra.Command { } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } - updateReq.NameArg = args[0] + updateReq.Name = args[0] response, err := w.Connections.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/credentials-manager/credentials-manager.go b/cmd/workspace/credentials-manager/credentials-manager.go index 30b33f7b3..132ba51ee 100755 --- a/cmd/workspace/credentials-manager/credentials-manager.go +++ b/cmd/workspace/credentials-manager/credentials-manager.go @@ -61,8 +61,8 @@ func newExchangeToken() *cobra.Command { cmd.Short = `Exchange token.` cmd.Long = `Exchange token. - Exchange tokens with an Identity Provider to get a new access token. It - allowes specifying scopes to determine token permissions.` + Exchange tokens with an Identity Provider to get a new access token. It allows + specifying scopes to determine token permissions.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index 0cd758189..34bbb28b4 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -276,7 +276,10 @@ func newList() *cobra.Command { cmd.Short = `Get dashboard objects.` cmd.Long = `Get dashboard objects. - Fetch a paginated list of dashboard objects.` + Fetch a paginated list of dashboard objects. + + ### **Warning: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban.**` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go index 3a644b933..518e97c45 100755 --- a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go +++ b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go @@ -42,6 +42,84 @@ func New() *cobra.Command { return cmd } +// start cancel-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cancelRefreshOverrides []func( + *cobra.Command, + *catalog.CancelRefreshRequest, +) + +func newCancelRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var cancelRefreshReq catalog.CancelRefreshRequest + + // TODO: short flags + + cmd.Use = "cancel-refresh FULL_NAME REFRESH_ID" + cmd.Short = `Cancel refresh.` + cmd.Long = `Cancel refresh. + + Cancel an active monitor refresh for the given refresh ID. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + FULL_NAME: Full name of the table. + REFRESH_ID: ID of the refresh.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + cancelRefreshReq.FullName = args[0] + cancelRefreshReq.RefreshId = args[1] + + err = w.LakehouseMonitors.CancelRefresh(ctx, cancelRefreshReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range cancelRefreshOverrides { + fn(cmd, &cancelRefreshReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newCancelRefresh()) + }) +} + // start create command // Slice with functions to override default command behavior. @@ -302,6 +380,229 @@ func init() { }) } +// start get-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getRefreshOverrides []func( + *cobra.Command, + *catalog.GetRefreshRequest, +) + +func newGetRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var getRefreshReq catalog.GetRefreshRequest + + // TODO: short flags + + cmd.Use = "get-refresh FULL_NAME REFRESH_ID" + cmd.Short = `Get refresh.` + cmd.Long = `Get refresh. + + Gets info about a specific monitor refresh using the given refresh ID. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + FULL_NAME: Full name of the table. + REFRESH_ID: ID of the refresh.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getRefreshReq.FullName = args[0] + getRefreshReq.RefreshId = args[1] + + response, err := w.LakehouseMonitors.GetRefresh(ctx, getRefreshReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getRefreshOverrides { + fn(cmd, &getRefreshReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetRefresh()) + }) +} + +// start list-refreshes command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listRefreshesOverrides []func( + *cobra.Command, + *catalog.ListRefreshesRequest, +) + +func newListRefreshes() *cobra.Command { + cmd := &cobra.Command{} + + var listRefreshesReq catalog.ListRefreshesRequest + + // TODO: short flags + + cmd.Use = "list-refreshes FULL_NAME" + cmd.Short = `List refreshes.` + cmd.Long = `List refreshes. + + Gets an array containing the history of the most recent refreshes (up to 25) + for this table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + FULL_NAME: Full name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listRefreshesReq.FullName = args[0] + + response, err := w.LakehouseMonitors.ListRefreshes(ctx, listRefreshesReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listRefreshesOverrides { + fn(cmd, &listRefreshesReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newListRefreshes()) + }) +} + +// start run-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var runRefreshOverrides []func( + *cobra.Command, + *catalog.RunRefreshRequest, +) + +func newRunRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var runRefreshReq catalog.RunRefreshRequest + + // TODO: short flags + + cmd.Use = "run-refresh FULL_NAME" + cmd.Short = `Queue a metric refresh for a monitor.` + cmd.Long = `Queue a metric refresh for a monitor. + + Queues a metric refresh on the monitor for the specified table. The refresh + will execute in the background. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + FULL_NAME: Full name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + runRefreshReq.FullName = args[0] + + response, err := w.LakehouseMonitors.RunRefresh(ctx, runRefreshReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range runRefreshOverrides { + fn(cmd, &runRefreshReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newRunRefresh()) + }) +} + // start update command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 67f1811d3..a6dddd0de 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -24,9 +24,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "dashboards", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Apply optional overrides to this command. diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index a0e03ad0d..fdd0d1c08 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -619,7 +619,6 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.DeltaSharingOrganizationName, "delta-sharing-organization-name", updateReq.DeltaSharingOrganizationName, `The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name.`) cmd.Flags().Int64Var(&updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, "delta-sharing-recipient-token-lifetime-in-seconds", updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, `The lifetime of delta sharing recipient token in seconds.`) cmd.Flags().Var(&updateReq.DeltaSharingScope, "delta-sharing-scope", `The scope of Delta Sharing enabled for the metastore. Supported values: [INTERNAL, INTERNAL_AND_EXTERNAL]`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The user-specified name of the metastore.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the metastore.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the metastore.`) cmd.Flags().StringVar(&updateReq.PrivilegeModelVersion, "privilege-model-version", updateReq.PrivilegeModelVersion, `Privilege model version of the metastore, of the form major.minor (e.g., 1.0).`) diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 488977100..ad54b6b10 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -705,96 +705,6 @@ func init() { }) } -// start reset command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var resetOverrides []func( - *cobra.Command, - *pipelines.ResetRequest, -) - -func newReset() *cobra.Command { - cmd := &cobra.Command{} - - var resetReq pipelines.ResetRequest - - var resetSkipWait bool - var resetTimeout time.Duration - - cmd.Flags().BoolVar(&resetSkipWait, "no-wait", resetSkipWait, `do not wait to reach RUNNING state`) - cmd.Flags().DurationVar(&resetTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach RUNNING state`) - // TODO: short flags - - cmd.Use = "reset PIPELINE_ID" - cmd.Short = `Reset a pipeline.` - cmd.Long = `Reset a pipeline. - - Resets a pipeline.` - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down." - names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have ") - } - resetReq.PipelineId = args[0] - - wait, err := w.Pipelines.Reset(ctx, resetReq) - if err != nil { - return err - } - if resetSkipWait { - return nil - } - spinner := cmdio.Spinner(ctx) - info, err := wait.OnProgress(func(i *pipelines.GetPipelineResponse) { - statusMessage := i.Cause - spinner <- statusMessage - }).GetWithTimeout(resetTimeout) - close(spinner) - if err != nil { - return err - } - return cmdio.Render(ctx, info) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range resetOverrides { - fn(cmd, &resetReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReset()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index 774859f17..b506e180a 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -587,7 +587,6 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the registered model.`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the registered model.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the registered model.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the registered model.`) diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index bad61a5f1..fc496467e 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -378,7 +378,6 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of schema, relative to parent catalog.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the schema.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of schema.`) // TODO: map via StringToStringVar: properties diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 193434d4e..35b65eb64 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -3,6 +3,8 @@ package settings import ( + "fmt" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -44,23 +46,25 @@ func New() *cobra.Command { return cmd } -// start delete-default-workspace-namespace command +// start delete-default-namespace-setting command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var deleteDefaultWorkspaceNamespaceOverrides []func( +var deleteDefaultNamespaceSettingOverrides []func( *cobra.Command, - *settings.DeleteDefaultWorkspaceNamespaceRequest, + *settings.DeleteDefaultNamespaceSettingRequest, ) -func newDeleteDefaultWorkspaceNamespace() *cobra.Command { +func newDeleteDefaultNamespaceSetting() *cobra.Command { cmd := &cobra.Command{} - var deleteDefaultWorkspaceNamespaceReq settings.DeleteDefaultWorkspaceNamespaceRequest + var deleteDefaultNamespaceSettingReq settings.DeleteDefaultNamespaceSettingRequest // TODO: short flags - cmd.Use = "delete-default-workspace-namespace ETAG" + cmd.Flags().StringVar(&deleteDefaultNamespaceSettingReq.Etag, "etag", deleteDefaultNamespaceSettingReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete-default-namespace-setting" cmd.Short = `Delete the default namespace setting.` cmd.Long = `Delete the default namespace setting. @@ -68,159 +72,7 @@ func newDeleteDefaultWorkspaceNamespace() *cobra.Command { be provided in DELETE requests (as a query parameter). The etag can be retrieved by making a GET request before the DELETE request. If the setting is updated/deleted concurrently, DELETE will fail with 409 and the request will - need to be retried by using the fresh etag in the 409 response. - - Arguments: - ETAG: etag used for versioning. The response is at least as fresh as the eTag - provided. This is used for optimistic concurrency control as a way to help - prevent simultaneous writes of a setting overwriting each other. It is - strongly suggested that systems make use of the etag in the read -> delete - pattern to perform setting deletions in order to avoid race conditions. - That is, get an etag from a GET request, and pass it with the DELETE - request to identify the rule set version you are deleting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - deleteDefaultWorkspaceNamespaceReq.Etag = args[0] - - response, err := w.Settings.DeleteDefaultWorkspaceNamespace(ctx, deleteDefaultWorkspaceNamespaceReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteDefaultWorkspaceNamespaceOverrides { - fn(cmd, &deleteDefaultWorkspaceNamespaceReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteDefaultWorkspaceNamespace()) - }) -} - -// start read-default-workspace-namespace command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var readDefaultWorkspaceNamespaceOverrides []func( - *cobra.Command, - *settings.ReadDefaultWorkspaceNamespaceRequest, -) - -func newReadDefaultWorkspaceNamespace() *cobra.Command { - cmd := &cobra.Command{} - - var readDefaultWorkspaceNamespaceReq settings.ReadDefaultWorkspaceNamespaceRequest - - // TODO: short flags - - cmd.Use = "read-default-workspace-namespace ETAG" - cmd.Short = `Get the default namespace setting.` - cmd.Long = `Get the default namespace setting. - - Gets the default namespace setting. - - Arguments: - ETAG: etag used for versioning. The response is at least as fresh as the eTag - provided. This is used for optimistic concurrency control as a way to help - prevent simultaneous writes of a setting overwriting each other. It is - strongly suggested that systems make use of the etag in the read -> delete - pattern to perform setting deletions in order to avoid race conditions. - That is, get an etag from a GET request, and pass it with the DELETE - request to identify the rule set version you are deleting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - readDefaultWorkspaceNamespaceReq.Etag = args[0] - - response, err := w.Settings.ReadDefaultWorkspaceNamespace(ctx, readDefaultWorkspaceNamespaceReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range readDefaultWorkspaceNamespaceOverrides { - fn(cmd, &readDefaultWorkspaceNamespaceReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReadDefaultWorkspaceNamespace()) - }) -} - -// start update-default-workspace-namespace command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updateDefaultWorkspaceNamespaceOverrides []func( - *cobra.Command, - *settings.UpdateDefaultWorkspaceNamespaceRequest, -) - -func newUpdateDefaultWorkspaceNamespace() *cobra.Command { - cmd := &cobra.Command{} - - var updateDefaultWorkspaceNamespaceReq settings.UpdateDefaultWorkspaceNamespaceRequest - var updateDefaultWorkspaceNamespaceJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updateDefaultWorkspaceNamespaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().BoolVar(&updateDefaultWorkspaceNamespaceReq.AllowMissing, "allow-missing", updateDefaultWorkspaceNamespaceReq.AllowMissing, `This should always be set to true for Settings API.`) - cmd.Flags().StringVar(&updateDefaultWorkspaceNamespaceReq.FieldMask, "field-mask", updateDefaultWorkspaceNamespaceReq.FieldMask, `Field mask is required to be passed into the PATCH request.`) - // TODO: complex arg: setting - - cmd.Use = "update-default-workspace-namespace" - cmd.Short = `Update the default namespace setting.` - cmd.Long = `Update the default namespace setting. - - Updates the default namespace setting for the workspace. A fresh etag needs to - be provided in PATCH requests (as part of the setting field). The etag can be - retrieved by making a GET request before the PATCH request. Note that if the - setting does not exist, GET will return a NOT_FOUND error and the etag will be - present in the error response, which should be set in the PATCH request. If - the setting is updated concurrently, PATCH will fail with 409 and the request - will need to be retried by using the fresh etag in the 409 response.` + need to be retried by using the fresh etag in the 409 response.` cmd.Annotations = make(map[string]string) @@ -234,14 +86,7 @@ func newUpdateDefaultWorkspaceNamespace() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = updateDefaultWorkspaceNamespaceJson.Unmarshal(&updateDefaultWorkspaceNamespaceReq) - if err != nil { - return err - } - } - - response, err := w.Settings.UpdateDefaultWorkspaceNamespace(ctx, updateDefaultWorkspaceNamespaceReq) + response, err := w.Settings.DeleteDefaultNamespaceSetting(ctx, deleteDefaultNamespaceSettingReq) if err != nil { return err } @@ -253,8 +98,8 @@ func newUpdateDefaultWorkspaceNamespace() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range updateDefaultWorkspaceNamespaceOverrides { - fn(cmd, &updateDefaultWorkspaceNamespaceReq) + for _, fn := range deleteDefaultNamespaceSettingOverrides { + fn(cmd, &deleteDefaultNamespaceSettingReq) } return cmd @@ -262,7 +107,334 @@ func newUpdateDefaultWorkspaceNamespace() *cobra.Command { func init() { cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateDefaultWorkspaceNamespace()) + cmd.AddCommand(newDeleteDefaultNamespaceSetting()) + }) +} + +// start delete-restrict-workspace-admins-setting command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteRestrictWorkspaceAdminsSettingOverrides []func( + *cobra.Command, + *settings.DeleteRestrictWorkspaceAdminsSettingRequest, +) + +func newDeleteRestrictWorkspaceAdminsSetting() *cobra.Command { + cmd := &cobra.Command{} + + var deleteRestrictWorkspaceAdminsSettingReq settings.DeleteRestrictWorkspaceAdminsSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteRestrictWorkspaceAdminsSettingReq.Etag, "etag", deleteRestrictWorkspaceAdminsSettingReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete-restrict-workspace-admins-setting" + cmd.Short = `Delete the restrict workspace admins setting.` + cmd.Long = `Delete the restrict workspace admins setting. + + Reverts the restrict workspace admins setting status for the workspace. A + fresh etag needs to be provided in DELETE requests (as a query parameter). The + etag can be retrieved by making a GET request before the DELETE request. If + the setting is updated/deleted concurrently, DELETE will fail with 409 and the + request will need to be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.DeleteRestrictWorkspaceAdminsSetting(ctx, deleteRestrictWorkspaceAdminsSettingReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteRestrictWorkspaceAdminsSettingOverrides { + fn(cmd, &deleteRestrictWorkspaceAdminsSettingReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDeleteRestrictWorkspaceAdminsSetting()) + }) +} + +// start get-default-namespace-setting command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getDefaultNamespaceSettingOverrides []func( + *cobra.Command, + *settings.GetDefaultNamespaceSettingRequest, +) + +func newGetDefaultNamespaceSetting() *cobra.Command { + cmd := &cobra.Command{} + + var getDefaultNamespaceSettingReq settings.GetDefaultNamespaceSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getDefaultNamespaceSettingReq.Etag, "etag", getDefaultNamespaceSettingReq.Etag, `etag used for versioning.`) + + cmd.Use = "get-default-namespace-setting" + cmd.Short = `Get the default namespace setting.` + cmd.Long = `Get the default namespace setting. + + Gets the default namespace setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.GetDefaultNamespaceSetting(ctx, getDefaultNamespaceSettingReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getDefaultNamespaceSettingOverrides { + fn(cmd, &getDefaultNamespaceSettingReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetDefaultNamespaceSetting()) + }) +} + +// start get-restrict-workspace-admins-setting command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getRestrictWorkspaceAdminsSettingOverrides []func( + *cobra.Command, + *settings.GetRestrictWorkspaceAdminsSettingRequest, +) + +func newGetRestrictWorkspaceAdminsSetting() *cobra.Command { + cmd := &cobra.Command{} + + var getRestrictWorkspaceAdminsSettingReq settings.GetRestrictWorkspaceAdminsSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getRestrictWorkspaceAdminsSettingReq.Etag, "etag", getRestrictWorkspaceAdminsSettingReq.Etag, `etag used for versioning.`) + + cmd.Use = "get-restrict-workspace-admins-setting" + cmd.Short = `Get the restrict workspace admins setting.` + cmd.Long = `Get the restrict workspace admins setting. + + Gets the restrict workspace admins setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.GetRestrictWorkspaceAdminsSetting(ctx, getRestrictWorkspaceAdminsSettingReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getRestrictWorkspaceAdminsSettingOverrides { + fn(cmd, &getRestrictWorkspaceAdminsSettingReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGetRestrictWorkspaceAdminsSetting()) + }) +} + +// start update-default-namespace-setting command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateDefaultNamespaceSettingOverrides []func( + *cobra.Command, + *settings.UpdateDefaultNamespaceSettingRequest, +) + +func newUpdateDefaultNamespaceSetting() *cobra.Command { + cmd := &cobra.Command{} + + var updateDefaultNamespaceSettingReq settings.UpdateDefaultNamespaceSettingRequest + var updateDefaultNamespaceSettingJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateDefaultNamespaceSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update-default-namespace-setting" + cmd.Short = `Update the default namespace setting.` + cmd.Long = `Update the default namespace setting. + + Updates the default namespace setting for the workspace. A fresh etag needs to + be provided in PATCH requests (as part of the setting field). The etag can be + retrieved by making a GET request before the PATCH request. Note that if the + setting does not exist, GET will return a NOT_FOUND error and the etag will be + present in the error response, which should be set in the PATCH request. If + the setting is updated concurrently, PATCH will fail with 409 and the request + will need to be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateDefaultNamespaceSettingJson.Unmarshal(&updateDefaultNamespaceSettingReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.UpdateDefaultNamespaceSetting(ctx, updateDefaultNamespaceSettingReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateDefaultNamespaceSettingOverrides { + fn(cmd, &updateDefaultNamespaceSettingReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdateDefaultNamespaceSetting()) + }) +} + +// start update-restrict-workspace-admins-setting command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateRestrictWorkspaceAdminsSettingOverrides []func( + *cobra.Command, + *settings.UpdateRestrictWorkspaceAdminsSettingRequest, +) + +func newUpdateRestrictWorkspaceAdminsSetting() *cobra.Command { + cmd := &cobra.Command{} + + var updateRestrictWorkspaceAdminsSettingReq settings.UpdateRestrictWorkspaceAdminsSettingRequest + var updateRestrictWorkspaceAdminsSettingJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateRestrictWorkspaceAdminsSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update-restrict-workspace-admins-setting" + cmd.Short = `Update the restrict workspace admins setting.` + cmd.Long = `Update the restrict workspace admins setting. + + Updates the restrict workspace admins setting for the workspace. A fresh etag + needs to be provided in PATCH requests (as part of the setting field). The + etag can be retrieved by making a GET request before the PATCH request. If the + setting is updated concurrently, PATCH will fail with 409 and the request will + need to be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateRestrictWorkspaceAdminsSettingJson.Unmarshal(&updateRestrictWorkspaceAdminsSettingReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.UpdateRestrictWorkspaceAdminsSetting(ctx, updateRestrictWorkspaceAdminsSettingReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateRestrictWorkspaceAdminsSettingOverrides { + fn(cmd, &updateRestrictWorkspaceAdminsSettingReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newUpdateRestrictWorkspaceAdminsSetting()) }) } diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index 8999967f1..0d3277f2a 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -61,7 +61,7 @@ func newCreateIndex() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`) - // TODO: complex arg: delta_sync_vector_index_spec + // TODO: complex arg: delta_sync_index_spec // TODO: complex arg: direct_access_index_spec cmd.Flags().StringVar(&createIndexReq.EndpointName, "endpoint-name", createIndexReq.EndpointName, `Name of the endpoint to be used for serving the index.`) diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 77b601819..1944237c0 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -174,7 +174,7 @@ func newDelete() *cobra.Command { // TODO: short flags - cmd.Use = "delete FULL_NAME_ARG" + cmd.Use = "delete NAME" cmd.Short = `Delete a Volume.` cmd.Long = `Delete a Volume. @@ -185,7 +185,7 @@ func newDelete() *cobra.Command { on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. Arguments: - FULL_NAME_ARG: The three-level (fully qualified) name of the volume` + NAME: The three-level (fully qualified) name of the volume` cmd.Annotations = make(map[string]string) @@ -196,7 +196,7 @@ func newDelete() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down." + promptSpinner <- "No NAME argument specified. Loading names for Volumes drop-down." names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{}) close(promptSpinner) if err != nil { @@ -211,7 +211,7 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the three-level (fully qualified) name of the volume") } - deleteReq.FullNameArg = args[0] + deleteReq.Name = args[0] err = w.Volumes.Delete(ctx, deleteReq) if err != nil { @@ -254,12 +254,15 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of volumes to return (page length).`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token returned by a previous request.`) + cmd.Use = "list CATALOG_NAME SCHEMA_NAME" cmd.Short = `List Volumes.` cmd.Long = `List Volumes. - Gets an array of all volumes for the current metastore under the parent - catalog and schema. + Gets an array of volumes for the current metastore under the parent catalog + and schema. The returned volumes are filtered based on the privileges of the calling user. For example, the metastore admin is able to list all the volumes. A regular @@ -274,9 +277,6 @@ func newList() *cobra.Command { CATALOG_NAME: The identifier of the catalog SCHEMA_NAME: The identifier of the schema` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -333,7 +333,7 @@ func newRead() *cobra.Command { // TODO: short flags - cmd.Use = "read FULL_NAME_ARG" + cmd.Use = "read NAME" cmd.Short = `Get a Volume.` cmd.Long = `Get a Volume. @@ -345,7 +345,7 @@ func newRead() *cobra.Command { the **USE_SCHEMA** privilege on the parent schema. Arguments: - FULL_NAME_ARG: The three-level (fully qualified) name of the volume` + NAME: The three-level (fully qualified) name of the volume` cmd.Annotations = make(map[string]string) @@ -356,7 +356,7 @@ func newRead() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down." + promptSpinner <- "No NAME argument specified. Loading names for Volumes drop-down." names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{}) close(promptSpinner) if err != nil { @@ -371,7 +371,7 @@ func newRead() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the three-level (fully qualified) name of the volume") } - readReq.FullNameArg = args[0] + readReq.Name = args[0] response, err := w.Volumes.Read(ctx, readReq) if err != nil { @@ -417,11 +417,10 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the volume.`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the volume.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the volume.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the volume.`) - cmd.Use = "update FULL_NAME_ARG" + cmd.Use = "update NAME" cmd.Short = `Update a Volume.` cmd.Long = `Update a Volume. @@ -435,7 +434,7 @@ func newUpdate() *cobra.Command { updated. Arguments: - FULL_NAME_ARG: The three-level (fully qualified) name of the volume` + NAME: The three-level (fully qualified) name of the volume` cmd.Annotations = make(map[string]string) @@ -452,7 +451,7 @@ func newUpdate() *cobra.Command { } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down." + promptSpinner <- "No NAME argument specified. Loading names for Volumes drop-down." names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{}) close(promptSpinner) if err != nil { @@ -467,7 +466,7 @@ func newUpdate() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the three-level (fully qualified) name of the volume") } - updateReq.FullNameArg = args[0] + updateReq.Name = args[0] response, err := w.Volumes.Update(ctx, updateReq) if err != nil { diff --git a/go.mod b/go.mod index 6a634ca28..4aaecd1d0 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.30.1 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.32.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause @@ -41,7 +41,7 @@ require ( github.com/cloudflare/circl v1.3.7 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -55,18 +55,18 @@ require ( github.com/stretchr/objx v0.5.0 // indirect github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect + go.opentelemetry.io/otel v1.22.0 // indirect + go.opentelemetry.io/otel/metric v1.22.0 // indirect + go.opentelemetry.io/otel/trace v1.22.0 // indirect golang.org/x/crypto v0.19.0 // indirect golang.org/x/net v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.154.0 // indirect + google.golang.org/api v0.161.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index a754df59f..545ff9e35 100644 --- a/go.sum +++ b/go.sum @@ -28,8 +28,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.30.1 h1:ux6I3aHqUH/AOLZEaEHBmwkbHuSAmb+42mTfvh2A7bE= -github.com/databricks/databricks-sdk-go v0.30.1/go.mod h1:QB64wT8EmR9T4ZPqeTRKjfIF4tPZuP9M9kM8Hcr019Q= +github.com/databricks/databricks-sdk-go v0.32.0 h1:H6SQmfOOXd6x2fOp+zISkcR1nzJ7NTXXmIv8lWyK66Y= +github.com/databricks/databricks-sdk-go v0.32.0/go.mod h1:yyXGdhEfXBBsIoTm0mdl8QN0xzCQPUVZTozMM/7wVuI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -53,8 +53,8 @@ github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgF github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -161,16 +161,16 @@ github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= +go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= +go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= +go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= +go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -244,8 +244,8 @@ golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.154.0 h1:X7QkVKZBskztmpPKWQXgjJRPA2dJYrL6r+sYPRLj050= -google.golang.org/api v0.154.0/go.mod h1:qhSMkM85hgqiokIYsrRyKxrjfBeIhgl4Z2JmeRkYylc= +google.golang.org/api v0.161.0 h1:oYzk/bs26WN10AV7iU7MVJVXBH8oCPS2hHyBiEeFoSU= +google.golang.org/api v0.161.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -253,15 +253,15 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -273,8 +273,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= From e474948a4b0eeddd708c4e3e0b6cc23c664417b2 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 15 Feb 2024 16:03:19 +0100 Subject: [PATCH 031/286] Generate correct YAML if custom_tags or spark_conf is used for pipeline or job cluster configuration (#1210) These fields (key and values) needs to be double quoted in order for yaml loader to read, parse and unmarshal it into Go struct correctly because these fields are `map[string]string` type. ## Tests Added regression unit and E2E tests --- cmd/bundle/generate/generate_test.go | 109 +++++++++++++++++++++ cmd/bundle/generate/job.go | 9 +- cmd/bundle/generate/pipeline.go | 11 ++- internal/bundle/generate_job_test.go | 5 + internal/bundle/generate_pipeline_test.go | 19 ++++ libs/dyn/yamlsaver/saver.go | 65 +++++++++---- libs/dyn/yamlsaver/saver_test.go | 113 ++++++++++++++++++---- 7 files changed, 292 insertions(+), 39 deletions(-) diff --git a/cmd/bundle/generate/generate_test.go b/cmd/bundle/generate/generate_test.go index b29bdef28..b71f1edfd 100644 --- a/cmd/bundle/generate/generate_test.go +++ b/cmd/bundle/generate/generate_test.go @@ -12,6 +12,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/mock" @@ -36,6 +38,18 @@ func TestGeneratePipelineCommand(t *testing.T) { Name: "test-pipeline", Spec: &pipelines.PipelineSpec{ Name: "test-pipeline", + Clusters: []pipelines.PipelineCluster{ + { + CustomTags: map[string]string{ + "Tag1": "24X7-1234", + }, + }, + { + SparkConf: map[string]string{ + "spark.databricks.delta.preview.enabled": "true", + }, + }, + }, Libraries: []pipelines.PipelineLibrary{ {Notebook: &pipelines.NotebookLibrary{ Path: "/test/notebook", @@ -85,6 +99,11 @@ func TestGeneratePipelineCommand(t *testing.T) { pipelines: test_pipeline: name: test-pipeline + clusters: + - custom_tags: + "Tag1": "24X7-1234" + - spark_conf: + "spark.databricks.delta.preview.enabled": "true" libraries: - notebook: path: %s @@ -100,3 +119,93 @@ func TestGeneratePipelineCommand(t *testing.T) { require.NoError(t, err) require.Equal(t, "Py content", string(data)) } + +func TestGenerateJobCommand(t *testing.T) { + cmd := NewGenerateJobCommand() + + root := t.TempDir() + b := &bundle.Bundle{ + Config: config.Root{ + Path: root, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + jobsApi := m.GetMockJobsAPI() + jobsApi.EXPECT().Get(mock.Anything, jobs.GetJobRequest{JobId: 1234}).Return(&jobs.Job{ + Settings: &jobs.JobSettings{ + Name: "test-job", + JobClusters: []jobs.JobCluster{ + {NewCluster: &compute.ClusterSpec{ + CustomTags: map[string]string{ + "Tag1": "24X7-1234", + }, + }}, + {NewCluster: &compute.ClusterSpec{ + SparkConf: map[string]string{ + "spark.databricks.delta.preview.enabled": "true", + }, + }}, + }, + Tasks: []jobs.Task{ + { + TaskKey: "notebook_task", + NotebookTask: &jobs.NotebookTask{ + NotebookPath: "/test/notebook", + }, + }, + }, + }, + }, nil) + + workspaceApi := m.GetMockWorkspaceAPI() + workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/test/notebook").Return(&workspace.ObjectInfo{ + ObjectType: workspace.ObjectTypeNotebook, + Language: workspace.LanguagePython, + Path: "/test/notebook", + }, nil) + + notebookContent := io.NopCloser(bytes.NewBufferString("# Databricks notebook source\nNotebook content")) + workspaceApi.EXPECT().Download(mock.Anything, "/test/notebook", mock.Anything).Return(notebookContent, nil) + + cmd.SetContext(bundle.Context(context.Background(), b)) + cmd.Flag("existing-job-id").Value.Set("1234") + + configDir := filepath.Join(root, "resources") + cmd.Flag("config-dir").Value.Set(configDir) + + srcDir := filepath.Join(root, "src") + cmd.Flag("source-dir").Value.Set(srcDir) + + var key string + cmd.Flags().StringVar(&key, "key", "test_job", "") + + err := cmd.RunE(cmd, []string{}) + require.NoError(t, err) + + data, err := os.ReadFile(filepath.Join(configDir, "test_job.yml")) + require.NoError(t, err) + + require.Equal(t, fmt.Sprintf(`resources: + jobs: + test_job: + name: test-job + job_clusters: + - new_cluster: + custom_tags: + "Tag1": "24X7-1234" + - new_cluster: + spark_conf: + "spark.databricks.delta.preview.enabled": "true" + tasks: + - task_key: notebook_task + notebook_task: + notebook_path: %s +`, filepath.Join("..", "src", "notebook.py")), string(data)) + + data, err = os.ReadFile(filepath.Join(srcDir, "notebook.py")) + require.NoError(t, err) + require.Equal(t, "# Databricks notebook source\nNotebook content", string(data)) +} diff --git a/cmd/bundle/generate/job.go b/cmd/bundle/generate/job.go index b88b2c17b..c5a94a8f6 100644 --- a/cmd/bundle/generate/job.go +++ b/cmd/bundle/generate/job.go @@ -14,6 +14,7 @@ import ( "github.com/databricks/cli/libs/textutil" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/spf13/cobra" + "gopkg.in/yaml.v3" ) func NewGenerateJobCommand() *cobra.Command { @@ -82,7 +83,13 @@ func NewGenerateJobCommand() *cobra.Command { } filename := filepath.Join(configDir, fmt.Sprintf("%s.yml", jobKey)) - err = yamlsaver.SaveAsYAML(result, filename, force) + saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{ + // Including all JobSettings and nested fields which are map[string]string type + "spark_conf": yaml.DoubleQuotedStyle, + "custom_tags": yaml.DoubleQuotedStyle, + "tags": yaml.DoubleQuotedStyle, + }) + err = saver.SaveAsYAML(result, filename, force) if err != nil { return err } diff --git a/cmd/bundle/generate/pipeline.go b/cmd/bundle/generate/pipeline.go index 955db34b2..4c5fcf425 100644 --- a/cmd/bundle/generate/pipeline.go +++ b/cmd/bundle/generate/pipeline.go @@ -14,6 +14,7 @@ import ( "github.com/databricks/cli/libs/textutil" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/spf13/cobra" + "gopkg.in/yaml.v3" ) func NewGeneratePipelineCommand() *cobra.Command { @@ -82,7 +83,15 @@ func NewGeneratePipelineCommand() *cobra.Command { } filename := filepath.Join(configDir, fmt.Sprintf("%s.yml", pipelineKey)) - err = yamlsaver.SaveAsYAML(result, filename, force) + saver := yamlsaver.NewSaverWithStyle( + // Including all PipelineSpec and nested fields which are map[string]string type + map[string]yaml.Style{ + "spark_conf": yaml.DoubleQuotedStyle, + "custom_tags": yaml.DoubleQuotedStyle, + "configuration": yaml.DoubleQuotedStyle, + }, + ) + err = saver.SaveAsYAML(result, filename, force) if err != nil { return err } diff --git a/internal/bundle/generate_job_test.go b/internal/bundle/generate_job_test.go index e6f157809..847a7a14e 100644 --- a/internal/bundle/generate_job_test.go +++ b/internal/bundle/generate_job_test.go @@ -103,6 +103,11 @@ func (gt *generateJobTest) createTestJob(ctx context.Context) int64 { SparkVersion: "13.3.x-scala2.12", NumWorkers: 1, NodeTypeId: nodeTypeId, + SparkConf: map[string]string{ + "spark.databricks.enableWsfs": "true", + "spark.databricks.hive.metastore.glueCatalog.enabled": "true", + "spark.databricks.pip.ignoreSSL": "true", + }, }, NotebookTask: &jobs.NotebookTask{ NotebookPath: path.Join(tmpdir, "test"), diff --git a/internal/bundle/generate_pipeline_test.go b/internal/bundle/generate_pipeline_test.go index b8a1ac849..82467952d 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/internal/bundle/generate_pipeline_test.go @@ -94,6 +94,9 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, err = f.Write(ctx, "test.py", strings.NewReader("print('Hello!')")) require.NoError(t, err) + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + nodeTypeId := internal.GetNodeTypeId(env) + name := internal.RandomName("generated-pipeline-") resp, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ Name: name, @@ -109,6 +112,22 @@ func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, }, }, }, + Clusters: []pipelines.PipelineCluster{ + { + CustomTags: map[string]string{ + "Tag1": "Yes", + "Tag2": "24X7", + "Tag3": "APP-1234", + }, + NodeTypeId: nodeTypeId, + NumWorkers: 2, + SparkConf: map[string]string{ + "spark.databricks.enableWsfs": "true", + "spark.databricks.hive.metastore.glueCatalog.enabled": "true", + "spark.databricks.pip.ignoreSSL": "true", + }, + }, + }, }) require.NoError(t, err) diff --git a/libs/dyn/yamlsaver/saver.go b/libs/dyn/yamlsaver/saver.go index f5863ecfb..84483a12f 100644 --- a/libs/dyn/yamlsaver/saver.go +++ b/libs/dyn/yamlsaver/saver.go @@ -13,7 +13,21 @@ import ( "gopkg.in/yaml.v3" ) -func SaveAsYAML(data any, filename string, force bool) error { +type saver struct { + nodesWithStyle map[string]yaml.Style +} + +func NewSaver() *saver { + return &saver{} +} + +func NewSaverWithStyle(nodesWithStyle map[string]yaml.Style) *saver { + return &saver{ + nodesWithStyle: nodesWithStyle, + } +} + +func (s *saver) SaveAsYAML(data any, filename string, force bool) error { err := os.MkdirAll(filepath.Dir(filename), 0755) if err != nil { return err @@ -36,15 +50,15 @@ func SaveAsYAML(data any, filename string, force bool) error { } defer file.Close() - err = encode(data, file) + err = s.encode(data, file) if err != nil { return err } return nil } -func encode(data any, w io.Writer) error { - yamlNode, err := ToYamlNode(dyn.V(data)) +func (s *saver) encode(data any, w io.Writer) error { + yamlNode, err := s.toYamlNode(dyn.V(data)) if err != nil { return err } @@ -53,7 +67,11 @@ func encode(data any, w io.Writer) error { return enc.Encode(yamlNode) } -func ToYamlNode(v dyn.Value) (*yaml.Node, error) { +func (s *saver) toYamlNode(v dyn.Value) (*yaml.Node, error) { + return s.toYamlNodeWithStyle(v, yaml.Style(0)) +} + +func (s *saver) toYamlNodeWithStyle(v dyn.Value, style yaml.Style) (*yaml.Node, error) { switch v.Kind() { case dyn.KindMap: m, _ := v.AsMap() @@ -68,8 +86,14 @@ func ToYamlNode(v dyn.Value) (*yaml.Node, error) { content := make([]*yaml.Node, 0) for _, k := range keys { item := m[k] - node := yaml.Node{Kind: yaml.ScalarNode, Value: k} - c, err := ToYamlNode(item) + node := yaml.Node{Kind: yaml.ScalarNode, Value: k, Style: style} + var nestedNodeStyle yaml.Style + if customStyle, ok := s.hasStyle(k); ok { + nestedNodeStyle = customStyle + } else { + nestedNodeStyle = style + } + c, err := s.toYamlNodeWithStyle(item, nestedNodeStyle) if err != nil { return nil, err } @@ -77,40 +101,45 @@ func ToYamlNode(v dyn.Value) (*yaml.Node, error) { content = append(content, c) } - return &yaml.Node{Kind: yaml.MappingNode, Content: content}, nil + return &yaml.Node{Kind: yaml.MappingNode, Content: content, Style: style}, nil case dyn.KindSequence: - s, _ := v.AsSequence() + seq, _ := v.AsSequence() content := make([]*yaml.Node, 0) - for _, item := range s { - node, err := ToYamlNode(item) + for _, item := range seq { + node, err := s.toYamlNodeWithStyle(item, style) if err != nil { return nil, err } content = append(content, node) } - return &yaml.Node{Kind: yaml.SequenceNode, Content: content}, nil + return &yaml.Node{Kind: yaml.SequenceNode, Content: content, Style: style}, nil case dyn.KindNil: - return &yaml.Node{Kind: yaml.ScalarNode, Value: "null"}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: "null", Style: style}, nil case dyn.KindString: // If the string is a scalar value (bool, int, float and etc.), we want to quote it. if isScalarValueInString(v) { return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustString(), Style: yaml.DoubleQuotedStyle}, nil } - return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustString()}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustString(), Style: style}, nil case dyn.KindBool: - return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustBool())}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustBool()), Style: style}, nil case dyn.KindInt: - return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustInt())}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustInt()), Style: style}, nil case dyn.KindFloat: - return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustFloat())}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustFloat()), Style: style}, nil case dyn.KindTime: - return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustTime().UTC().String()}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustTime().UTC().String(), Style: style}, nil default: // Panic because we only want to deal with known types. panic(fmt.Sprintf("invalid kind: %d", v.Kind())) } } +func (s *saver) hasStyle(key string) (yaml.Style, bool) { + style, ok := s.nodesWithStyle[key] + return style, ok +} + func isScalarValueInString(v dyn.Value) bool { if v.Kind() != dyn.KindString { return false diff --git a/libs/dyn/yamlsaver/saver_test.go b/libs/dyn/yamlsaver/saver_test.go index 70878d55b..ec44a4298 100644 --- a/libs/dyn/yamlsaver/saver_test.go +++ b/libs/dyn/yamlsaver/saver_test.go @@ -10,45 +10,51 @@ import ( ) func TestMarshalNilValue(t *testing.T) { + s := NewSaver() var nilValue = dyn.NilValue - v, err := ToYamlNode(nilValue) + v, err := s.toYamlNode(nilValue) assert.NoError(t, err) assert.Equal(t, "null", v.Value) } func TestMarshalIntValue(t *testing.T) { + s := NewSaver() var intValue = dyn.NewValue(1, dyn.Location{}) - v, err := ToYamlNode(intValue) + v, err := s.toYamlNode(intValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) assert.Equal(t, yaml.ScalarNode, v.Kind) } func TestMarshalFloatValue(t *testing.T) { + s := NewSaver() var floatValue = dyn.NewValue(1.0, dyn.Location{}) - v, err := ToYamlNode(floatValue) + v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) assert.Equal(t, yaml.ScalarNode, v.Kind) } func TestMarshalBoolValue(t *testing.T) { + s := NewSaver() var boolValue = dyn.NewValue(true, dyn.Location{}) - v, err := ToYamlNode(boolValue) + v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) assert.Equal(t, yaml.ScalarNode, v.Kind) } func TestMarshalTimeValue(t *testing.T) { + s := NewSaver() var timeValue = dyn.NewValue(time.Unix(0, 0), dyn.Location{}) - v, err := ToYamlNode(timeValue) + v, err := s.toYamlNode(timeValue) assert.NoError(t, err) assert.Equal(t, "1970-01-01 00:00:00 +0000 UTC", v.Value) assert.Equal(t, yaml.ScalarNode, v.Kind) } func TestMarshalSequenceValue(t *testing.T) { + s := NewSaver() var sequenceValue = dyn.NewValue( []dyn.Value{ dyn.NewValue("value1", dyn.Location{File: "file", Line: 1, Column: 2}), @@ -56,7 +62,7 @@ func TestMarshalSequenceValue(t *testing.T) { }, dyn.Location{File: "file", Line: 1, Column: 2}, ) - v, err := ToYamlNode(sequenceValue) + v, err := s.toYamlNode(sequenceValue) assert.NoError(t, err) assert.Equal(t, yaml.SequenceNode, v.Kind) assert.Equal(t, "value1", v.Content[0].Value) @@ -64,14 +70,16 @@ func TestMarshalSequenceValue(t *testing.T) { } func TestMarshalStringValue(t *testing.T) { + s := NewSaver() var stringValue = dyn.NewValue("value", dyn.Location{}) - v, err := ToYamlNode(stringValue) + v, err := s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "value", v.Value) assert.Equal(t, yaml.ScalarNode, v.Kind) } func TestMarshalMapValue(t *testing.T) { + s := NewSaver() var mapValue = dyn.NewValue( map[string]dyn.Value{ "key3": dyn.NewValue("value3", dyn.Location{File: "file", Line: 3, Column: 2}), @@ -80,7 +88,7 @@ func TestMarshalMapValue(t *testing.T) { }, dyn.Location{File: "file", Line: 1, Column: 2}, ) - v, err := ToYamlNode(mapValue) + v, err := s.toYamlNode(mapValue) assert.NoError(t, err) assert.Equal(t, yaml.MappingNode, v.Kind) assert.Equal(t, "key1", v.Content[0].Value) @@ -94,6 +102,7 @@ func TestMarshalMapValue(t *testing.T) { } func TestMarshalNestedValues(t *testing.T) { + s := NewSaver() var mapValue = dyn.NewValue( map[string]dyn.Value{ "key1": dyn.NewValue( @@ -105,7 +114,7 @@ func TestMarshalNestedValues(t *testing.T) { }, dyn.Location{File: "file", Line: 1, Column: 2}, ) - v, err := ToYamlNode(mapValue) + v, err := s.toYamlNode(mapValue) assert.NoError(t, err) assert.Equal(t, yaml.MappingNode, v.Kind) assert.Equal(t, "key1", v.Content[0].Value) @@ -115,15 +124,16 @@ func TestMarshalNestedValues(t *testing.T) { } func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { + s := NewSaver() var hexValue = dyn.NewValue(0x123, dyn.Location{}) - v, err := ToYamlNode(hexValue) + v, err := s.toYamlNode(hexValue) assert.NoError(t, err) assert.Equal(t, "291", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) var stringValue = dyn.NewValue("0x123", dyn.Location{}) - v, err = ToYamlNode(stringValue) + v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0x123", v.Value) assert.Equal(t, yaml.DoubleQuotedStyle, v.Style) @@ -131,15 +141,16 @@ func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { } func TestMarshalBinaryValueIsQuoted(t *testing.T) { + s := NewSaver() var binaryValue = dyn.NewValue(0b101, dyn.Location{}) - v, err := ToYamlNode(binaryValue) + v, err := s.toYamlNode(binaryValue) assert.NoError(t, err) assert.Equal(t, "5", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) var stringValue = dyn.NewValue("0b101", dyn.Location{}) - v, err = ToYamlNode(stringValue) + v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0b101", v.Value) assert.Equal(t, yaml.DoubleQuotedStyle, v.Style) @@ -147,15 +158,16 @@ func TestMarshalBinaryValueIsQuoted(t *testing.T) { } func TestMarshalOctalValueIsQuoted(t *testing.T) { + s := NewSaver() var octalValue = dyn.NewValue(0123, dyn.Location{}) - v, err := ToYamlNode(octalValue) + v, err := s.toYamlNode(octalValue) assert.NoError(t, err) assert.Equal(t, "83", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) var stringValue = dyn.NewValue("0123", dyn.Location{}) - v, err = ToYamlNode(stringValue) + v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0123", v.Value) assert.Equal(t, yaml.DoubleQuotedStyle, v.Style) @@ -163,15 +175,16 @@ func TestMarshalOctalValueIsQuoted(t *testing.T) { } func TestMarshalFloatValueIsQuoted(t *testing.T) { + s := NewSaver() var floatValue = dyn.NewValue(1.0, dyn.Location{}) - v, err := ToYamlNode(floatValue) + v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) var stringValue = dyn.NewValue("1.0", dyn.Location{}) - v, err = ToYamlNode(stringValue) + v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "1.0", v.Value) assert.Equal(t, yaml.DoubleQuotedStyle, v.Style) @@ -179,17 +192,79 @@ func TestMarshalFloatValueIsQuoted(t *testing.T) { } func TestMarshalBoolValueIsQuoted(t *testing.T) { + s := NewSaver() var boolValue = dyn.NewValue(true, dyn.Location{}) - v, err := ToYamlNode(boolValue) + v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) var stringValue = dyn.NewValue("true", dyn.Location{}) - v, err = ToYamlNode(stringValue) + v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) assert.Equal(t, yaml.DoubleQuotedStyle, v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) } + +func TestCustomStylingWithNestedMap(t *testing.T) { + s := NewSaverWithStyle(map[string]yaml.Style{ + "styled": yaml.DoubleQuotedStyle, + }) + + var styledMap = dyn.NewValue( + map[string]dyn.Value{ + "key1": dyn.NewValue("value1", dyn.Location{File: "file", Line: 1, Column: 2}), + "key2": dyn.NewValue("value2", dyn.Location{File: "file", Line: 2, Column: 2}), + }, + dyn.Location{File: "file", Line: -2, Column: 2}, + ) + + var unstyledMap = dyn.NewValue( + map[string]dyn.Value{ + "key3": dyn.NewValue("value3", dyn.Location{File: "file", Line: 1, Column: 2}), + "key4": dyn.NewValue("value4", dyn.Location{File: "file", Line: 2, Column: 2}), + }, + dyn.Location{File: "file", Line: -1, Column: 2}, + ) + + var val = dyn.NewValue( + map[string]dyn.Value{ + "styled": styledMap, + "unstyled": unstyledMap, + }, + dyn.Location{File: "file", Line: 1, Column: 2}, + ) + + mv, err := s.toYamlNode(val) + assert.NoError(t, err) + + // Check that the styled map is quoted + v := mv.Content[1] + + assert.Equal(t, yaml.MappingNode, v.Kind) + assert.Equal(t, "key1", v.Content[0].Value) + assert.Equal(t, "value1", v.Content[1].Value) + assert.Equal(t, yaml.DoubleQuotedStyle, v.Content[0].Style) + assert.Equal(t, yaml.DoubleQuotedStyle, v.Content[1].Style) + + assert.Equal(t, "key2", v.Content[2].Value) + assert.Equal(t, "value2", v.Content[3].Value) + assert.Equal(t, yaml.DoubleQuotedStyle, v.Content[2].Style) + assert.Equal(t, yaml.DoubleQuotedStyle, v.Content[3].Style) + + // Check that the unstyled map is not quoted + v = mv.Content[3] + + assert.Equal(t, yaml.MappingNode, v.Kind) + assert.Equal(t, "key3", v.Content[0].Value) + assert.Equal(t, "value3", v.Content[1].Value) + assert.Equal(t, yaml.Style(0), v.Content[0].Style) + assert.Equal(t, yaml.Style(0), v.Content[1].Style) + + assert.Equal(t, "key4", v.Content[2].Value) + assert.Equal(t, "value4", v.Content[3].Value) + assert.Equal(t, yaml.Style(0), v.Content[2].Style) + assert.Equal(t, yaml.Style(0), v.Content[3].Style) +} From 18166f5b4718b9000f69a8a3d65254e565a1cf72 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 15 Feb 2024 16:16:40 +0100 Subject: [PATCH 032/286] Add option to include fields present in the type but not in the value (#1211) ## Changes This feature supports variable lookups in a `dyn.Value` that are present in the type but haven't been initialized with a value. For example: `${bundle.git.origin_url}` is present in the `dyn.Value` only if it was assigned a value. If it wasn't assigned a value it should resolve to the empty string. This normalization option, when set, ensures that all fields that are represented in the specified type are present in the return value. This change is in support of #1098. ## Tests Added unit test. --- libs/dyn/convert/normalize.go | 102 +++++++++++++++++++++++------ libs/dyn/convert/normalize_test.go | 47 +++++++++++++ 2 files changed, 129 insertions(+), 20 deletions(-) diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 5595aae1e..26df09578 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -9,30 +9,51 @@ import ( "github.com/databricks/cli/libs/dyn" ) -func Normalize(dst any, src dyn.Value) (dyn.Value, diag.Diagnostics) { - return normalizeType(reflect.TypeOf(dst), src) +// NormalizeOption is the type for options that can be passed to Normalize. +type NormalizeOption int + +const ( + // IncludeMissingFields causes the normalization to include fields that defined on the given + // type but are missing in the source value. They are included with their zero values. + IncludeMissingFields NormalizeOption = iota +) + +type normalizeOptions struct { + includeMissingFields bool } -func normalizeType(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func Normalize(dst any, src dyn.Value, opts ...NormalizeOption) (dyn.Value, diag.Diagnostics) { + var n normalizeOptions + for _, opt := range opts { + switch opt { + case IncludeMissingFields: + n.includeMissingFields = true + } + } + + return n.normalizeType(reflect.TypeOf(dst), src) +} + +func (n normalizeOptions) normalizeType(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { for typ.Kind() == reflect.Pointer { typ = typ.Elem() } switch typ.Kind() { case reflect.Struct: - return normalizeStruct(typ, src) + return n.normalizeStruct(typ, src) case reflect.Map: - return normalizeMap(typ, src) + return n.normalizeMap(typ, src) case reflect.Slice: - return normalizeSlice(typ, src) + return n.normalizeSlice(typ, src) case reflect.String: - return normalizeString(typ, src) + return n.normalizeString(typ, src) case reflect.Bool: - return normalizeBool(typ, src) + return n.normalizeBool(typ, src) case reflect.Int, reflect.Int32, reflect.Int64: - return normalizeInt(typ, src) + return n.normalizeInt(typ, src) case reflect.Float32, reflect.Float64: - return normalizeFloat(typ, src) + return n.normalizeFloat(typ, src) } return dyn.InvalidValue, diag.Errorf("unsupported type: %s", typ.Kind()) @@ -46,7 +67,7 @@ func typeMismatch(expected dyn.Kind, src dyn.Value) diag.Diagnostic { } } -func normalizeStruct(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { @@ -65,7 +86,7 @@ func normalizeStruct(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnosti } // Normalize the value according to the field type. - v, err := normalizeType(typ.FieldByIndex(index).Type, v) + v, err := n.normalizeType(typ.FieldByIndex(index).Type, v) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. @@ -77,6 +98,47 @@ func normalizeStruct(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnosti out[k] = v } + // Return the normalized value if missing fields are not included. + if !n.includeMissingFields { + return dyn.NewValue(out, src.Location()), diags + } + + // Populate missing fields with their zero values. + for k, index := range info.Fields { + if _, ok := out[k]; ok { + continue + } + + // Optionally dereference pointers to get the underlying field type. + ftyp := typ.FieldByIndex(index).Type + for ftyp.Kind() == reflect.Pointer { + ftyp = ftyp.Elem() + } + + var v dyn.Value + switch ftyp.Kind() { + case reflect.Struct, reflect.Map: + v, _ = n.normalizeType(ftyp, dyn.V(map[string]dyn.Value{})) + case reflect.Slice: + v, _ = n.normalizeType(ftyp, dyn.V([]dyn.Value{})) + case reflect.String: + v, _ = n.normalizeType(ftyp, dyn.V("")) + case reflect.Bool: + v, _ = n.normalizeType(ftyp, dyn.V(false)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v, _ = n.normalizeType(ftyp, dyn.V(int64(0))) + case reflect.Float32, reflect.Float64: + v, _ = n.normalizeType(ftyp, dyn.V(float64(0))) + default: + // Skip fields for which we do not have a natural [dyn.Value] equivalent. + // For example, we don't handle reflect.Complex* and reflect.Uint* types. + continue + } + if v.IsValid() { + out[k] = v + } + } + return dyn.NewValue(out, src.Location()), diags case dyn.KindNil: return src, diags @@ -85,7 +147,7 @@ func normalizeStruct(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnosti return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src)) } -func normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { @@ -93,7 +155,7 @@ func normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) out := make(map[string]dyn.Value) for k, v := range src.MustMap() { // Normalize the value according to the map element type. - v, err := normalizeType(typ.Elem(), v) + v, err := n.normalizeType(typ.Elem(), v) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. @@ -113,7 +175,7 @@ func normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src)) } -func normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { @@ -121,7 +183,7 @@ func normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostic out := make([]dyn.Value, 0, len(src.MustSequence())) for _, v := range src.MustSequence() { // Normalize the value according to the slice element type. - v, err := normalizeType(typ.Elem(), v) + v, err := n.normalizeType(typ.Elem(), v) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. @@ -141,7 +203,7 @@ func normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostic return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindSequence, src)) } -func normalizeString(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeString(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out string @@ -161,7 +223,7 @@ func normalizeString(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnosti return dyn.NewValue(out, src.Location()), diags } -func normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out bool @@ -186,7 +248,7 @@ func normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics return dyn.NewValue(out, src.Location()), diags } -func normalizeInt(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out int64 @@ -210,7 +272,7 @@ func normalizeInt(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) return dyn.NewValue(out, src.Location()), diags } -func normalizeFloat(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out float64 diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 702816155..d59cc3b35 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -142,6 +142,53 @@ func TestNormalizeStructNestedError(t *testing.T) { ) } +func TestNormalizeStructIncludeMissingFields(t *testing.T) { + type Nested struct { + String string `json:"string"` + } + + type Tmp struct { + // Verify that fields that are already set in the dynamic value are not overridden. + Existing string `json:"existing"` + + // Verify that structs are recursively normalized if not set. + Nested Nested `json:"nested"` + Ptr *Nested `json:"ptr"` + + // Verify that containers are also zero-initialized if not set. + Map map[string]string `json:"map"` + Slice []string `json:"slice"` + + // Verify that primitive types are zero-initialized if not set. + String string `json:"string"` + Bool bool `json:"bool"` + Int int `json:"int"` + Float float64 `json:"float"` + } + + var typ Tmp + vin := dyn.V(map[string]dyn.Value{ + "existing": dyn.V("already set"), + }) + vout, err := Normalize(typ, vin, IncludeMissingFields) + assert.Empty(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "existing": dyn.V("already set"), + "nested": dyn.V(map[string]dyn.Value{ + "string": dyn.V(""), + }), + "ptr": dyn.V(map[string]dyn.Value{ + "string": dyn.V(""), + }), + "map": dyn.V(map[string]dyn.Value{}), + "slice": dyn.V([]dyn.Value{}), + "string": dyn.V(""), + "bool": dyn.V(false), + "int": dyn.V(int64(0)), + "float": dyn.V(float64(0)), + }), vout) +} + func TestNormalizeMap(t *testing.T) { var typ map[string]string vin := dyn.V(map[string]dyn.Value{ From 5063c48e83e16bb1b85142218e41cffa4cbf0810 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 15 Feb 2024 17:23:48 +0100 Subject: [PATCH 033/286] Trim trailing whitespace (#1206) --- LICENSE | 110 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/LICENSE b/LICENSE index dc30b4656..9878fb474 100644 --- a/LICENSE +++ b/LICENSE @@ -1,69 +1,69 @@ Databricks License Copyright (2022) Databricks, Inc. - Definitions. - - Agreement: The agreement between Databricks, Inc., and you governing - the use of the Databricks Services, as that term is defined in - the Master Cloud Services Agreement (MCSA) located at + Definitions. + + Agreement: The agreement between Databricks, Inc., and you governing + the use of the Databricks Services, as that term is defined in + the Master Cloud Services Agreement (MCSA) located at www.databricks.com/legal/mcsa. - - Licensed Materials: The source code, object code, data, and/or other - works to which this license applies. - Scope of Use. You may not use the Licensed Materials except in - connection with your use of the Databricks Services pursuant to - the Agreement. Your use of the Licensed Materials must comply at all - times with any restrictions applicable to the Databricks Services, - generally, and must be used in accordance with any applicable - documentation. You may view, use, copy, modify, publish, and/or - distribute the Licensed Materials solely for the purposes of using + Licensed Materials: The source code, object code, data, and/or other + works to which this license applies. + + Scope of Use. You may not use the Licensed Materials except in + connection with your use of the Databricks Services pursuant to + the Agreement. Your use of the Licensed Materials must comply at all + times with any restrictions applicable to the Databricks Services, + generally, and must be used in accordance with any applicable + documentation. You may view, use, copy, modify, publish, and/or + distribute the Licensed Materials solely for the purposes of using the Licensed Materials within or connecting to the Databricks Services. - If you do not agree to these terms, you may not view, use, copy, + If you do not agree to these terms, you may not view, use, copy, modify, publish, and/or distribute the Licensed Materials. - - Redistribution. You may redistribute and sublicense the Licensed - Materials so long as all use is in compliance with these terms. - In addition: - - - You must give any other recipients a copy of this License; - - You must cause any modified files to carry prominent notices - stating that you changed the files; - - You must retain, in any derivative works that you distribute, - all copyright, patent, trademark, and attribution notices, - excluding those notices that do not pertain to any part of - the derivative works; and - - If a "NOTICE" text file is provided as part of its - distribution, then any derivative works that you distribute - must include a readable copy of the attribution notices - contained within such NOTICE file, excluding those notices - that do not pertain to any part of the derivative works. - You may add your own copyright statement to your modifications and may - provide additional license terms and conditions for use, reproduction, - or distribution of your modifications, or for any such derivative works - as a whole, provided your use, reproduction, and distribution of - the Licensed Materials otherwise complies with the conditions stated + Redistribution. You may redistribute and sublicense the Licensed + Materials so long as all use is in compliance with these terms. + In addition: + + - You must give any other recipients a copy of this License; + - You must cause any modified files to carry prominent notices + stating that you changed the files; + - You must retain, in any derivative works that you distribute, + all copyright, patent, trademark, and attribution notices, + excluding those notices that do not pertain to any part of + the derivative works; and + - If a "NOTICE" text file is provided as part of its + distribution, then any derivative works that you distribute + must include a readable copy of the attribution notices + contained within such NOTICE file, excluding those notices + that do not pertain to any part of the derivative works. + + You may add your own copyright statement to your modifications and may + provide additional license terms and conditions for use, reproduction, + or distribution of your modifications, or for any such derivative works + as a whole, provided your use, reproduction, and distribution of + the Licensed Materials otherwise complies with the conditions stated in this License. - Termination. This license terminates automatically upon your breach of - these terms or upon the termination of your Agreement. Additionally, - Databricks may terminate this license at any time on notice. Upon - termination, you must permanently delete the Licensed Materials and + Termination. This license terminates automatically upon your breach of + these terms or upon the termination of your Agreement. Additionally, + Databricks may terminate this license at any time on notice. Upon + termination, you must permanently delete the Licensed Materials and all copies thereof. - DISCLAIMER; LIMITATION OF LIABILITY. + DISCLAIMER; LIMITATION OF LIABILITY. - THE LICENSED MATERIALS ARE PROVIDED “AS-IS” AND WITH ALL FAULTS. - DATABRICKS, ON BEHALF OF ITSELF AND ITS LICENSORS, SPECIFICALLY - DISCLAIMS ALL WARRANTIES RELATING TO THE LICENSED MATERIALS, EXPRESS - AND IMPLIED, INCLUDING, WITHOUT LIMITATION, IMPLIED WARRANTIES, - CONDITIONS AND OTHER TERMS OF MERCHANTABILITY, SATISFACTORY QUALITY OR - FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. DATABRICKS AND - ITS LICENSORS TOTAL AGGREGATE LIABILITY RELATING TO OR ARISING OUT OF - YOUR USE OF OR DATABRICKS’ PROVISIONING OF THE LICENSED MATERIALS SHALL - BE LIMITED TO ONE THOUSAND ($1,000) DOLLARS. IN NO EVENT SHALL - THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - ARISING FROM, OUT OF OR IN CONNECTION WITH THE LICENSED MATERIALS OR + THE LICENSED MATERIALS ARE PROVIDED “AS-IS” AND WITH ALL FAULTS. + DATABRICKS, ON BEHALF OF ITSELF AND ITS LICENSORS, SPECIFICALLY + DISCLAIMS ALL WARRANTIES RELATING TO THE LICENSED MATERIALS, EXPRESS + AND IMPLIED, INCLUDING, WITHOUT LIMITATION, IMPLIED WARRANTIES, + CONDITIONS AND OTHER TERMS OF MERCHANTABILITY, SATISFACTORY QUALITY OR + FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. DATABRICKS AND + ITS LICENSORS TOTAL AGGREGATE LIABILITY RELATING TO OR ARISING OUT OF + YOUR USE OF OR DATABRICKS’ PROVISIONING OF THE LICENSED MATERIALS SHALL + BE LIMITED TO ONE THOUSAND ($1,000) DOLLARS. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE LICENSED MATERIALS OR THE USE OR OTHER DEALINGS IN THE LICENSED MATERIALS. From 961d04d4f0dffc35205c1ea1f7ebec8e99492f5c Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 15 Feb 2024 18:34:17 +0100 Subject: [PATCH 034/286] Release v0.213.0 (#1212) CLI: * Ignore environment variables for `auth profiles` ([#1189](https://github.com/databricks/cli/pull/1189)). * Update LICENSE file to match Databricks license language ([#1013](https://github.com/databricks/cli/pull/1013)). Bundles: * Added `bundle deployment bind` and `unbind` command ([#1131](https://github.com/databricks/cli/pull/1131)). * Use allowlist for Git-related fields to include in metadata ([#1187](https://github.com/databricks/cli/pull/1187)). * Added `--restart` flag for `bundle run` command ([#1191](https://github.com/databricks/cli/pull/1191)). * Generate correct YAML if `custom_tags` or `spark_conf` is used for pipeline or job cluster configuration ([#1210](https://github.com/databricks/cli/pull/1210)). Internal: * Move folders package into libs ([#1184](https://github.com/databricks/cli/pull/1184)). * Log time it takes for profile to load ([#1186](https://github.com/databricks/cli/pull/1186)). * Use mockery to generate mocks compatible with testify/mock ([#1190](https://github.com/databricks/cli/pull/1190)). * Retain partially valid structs in `convert.Normalize` ([#1203](https://github.com/databricks/cli/pull/1203)). * Skip `for_each_task` when generating the bundle schema ([#1204](https://github.com/databricks/cli/pull/1204)). * Regenerate the CLI using the same OpenAPI spec as the SDK ([#1205](https://github.com/databricks/cli/pull/1205)). * Avoid race-conditions while executing sub-commands ([#1201](https://github.com/databricks/cli/pull/1201)). API Changes: * Added `databricks tables exists` command. * Added `databricks lakehouse-monitors` command group. * Removed `databricks files get-status` command. * Added `databricks files create-directory` command. * Added `databricks files delete-directory` command. * Added `databricks files get-directory-metadata` command. * Added `databricks files get-metadata` command. * Added `databricks files list-directory-contents` command. * Removed `databricks pipelines reset` command. * Changed `databricks account settings delete-personal-compute-setting` command with new required argument order. * Removed `databricks account settings read-personal-compute-setting` command. * Changed `databricks account settings update-personal-compute-setting` command with new required argument order. * Added `databricks account settings get-personal-compute-setting` command. * Removed `databricks settings delete-default-workspace-namespace` command. * Removed `databricks settings read-default-workspace-namespace` command. * Removed `databricks settings update-default-workspace-namespace` command. * Added `databricks settings delete-default-namespace-setting` command. * Added `databricks settings delete-restrict-workspace-admins-setting` command. * Added `databricks settings get-default-namespace-setting` command. * Added `databricks settings get-restrict-workspace-admins-setting` command. * Added `databricks settings update-default-namespace-setting` command. * Added `databricks settings update-restrict-workspace-admins-setting` command. * Changed `databricks token-management create-obo-token` command with new required argument order. * Changed `databricks token-management get` command to return . * Changed `databricks dashboards create` command . New request type is . * Added `databricks dashboards update` command. OpenAPI commit c40670f5a2055c92cf0a6aac92a5bccebfb80866 (2024-02-14) Dependency updates: * Bump github.com/hashicorp/hc-install from 0.6.2 to 0.6.3 ([#1200](https://github.com/databricks/cli/pull/1200)). * Bump golang.org/x/term from 0.16.0 to 0.17.0 ([#1197](https://github.com/databricks/cli/pull/1197)). * Bump golang.org/x/oauth2 from 0.16.0 to 0.17.0 ([#1198](https://github.com/databricks/cli/pull/1198)). * Bump github.com/databricks/databricks-sdk-go from 0.30.1 to 0.32.0 ([#1199](https://github.com/databricks/cli/pull/1199)). --------- Co-authored-by: Pieter Noordhuis --- CHANGELOG.md | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 037028c94..f68ceabca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,61 @@ # Version changelog +## 0.213.0 + +CLI: + * Ignore environment variables for `auth profiles` ([#1189](https://github.com/databricks/cli/pull/1189)). + * Update LICENSE file to match Databricks license language ([#1013](https://github.com/databricks/cli/pull/1013)). + +Bundles: + * Added `bundle deployment bind` and `unbind` command ([#1131](https://github.com/databricks/cli/pull/1131)). + * Use allowlist for Git-related fields to include in metadata ([#1187](https://github.com/databricks/cli/pull/1187)). + * Added `--restart` flag for `bundle run` command ([#1191](https://github.com/databricks/cli/pull/1191)). + * Generate correct YAML if `custom_tags` or `spark_conf` is used for pipeline or job cluster configuration ([#1210](https://github.com/databricks/cli/pull/1210)). + +Internal: + * Move folders package into libs ([#1184](https://github.com/databricks/cli/pull/1184)). + * Log time it takes for profile to load ([#1186](https://github.com/databricks/cli/pull/1186)). + * Use mockery to generate mocks compatible with testify/mock ([#1190](https://github.com/databricks/cli/pull/1190)). + * Retain partially valid structs in `convert.Normalize` ([#1203](https://github.com/databricks/cli/pull/1203)). + * Skip `for_each_task` when generating the bundle schema ([#1204](https://github.com/databricks/cli/pull/1204)). + * Regenerate the CLI using the same OpenAPI spec as the SDK ([#1205](https://github.com/databricks/cli/pull/1205)). + * Avoid race-conditions while executing sub-commands ([#1201](https://github.com/databricks/cli/pull/1201)). + +API Changes: + * Added `databricks tables exists` command. + * Added `databricks lakehouse-monitors` command group. + * Removed `databricks files get-status` command. + * Added `databricks files create-directory` command. + * Added `databricks files delete-directory` command. + * Added `databricks files get-directory-metadata` command. + * Added `databricks files get-metadata` command. + * Added `databricks files list-directory-contents` command. + * Removed `databricks pipelines reset` command. + * Changed `databricks account settings delete-personal-compute-setting` command with new required argument order. + * Removed `databricks account settings read-personal-compute-setting` command. + * Changed `databricks account settings update-personal-compute-setting` command with new required argument order. + * Added `databricks account settings get-personal-compute-setting` command. + * Removed `databricks settings delete-default-workspace-namespace` command. + * Removed `databricks settings read-default-workspace-namespace` command. + * Removed `databricks settings update-default-workspace-namespace` command. + * Added `databricks settings delete-default-namespace-setting` command. + * Added `databricks settings delete-restrict-workspace-admins-setting` command. + * Added `databricks settings get-default-namespace-setting` command. + * Added `databricks settings get-restrict-workspace-admins-setting` command. + * Added `databricks settings update-default-namespace-setting` command. + * Added `databricks settings update-restrict-workspace-admins-setting` command. + * Changed `databricks token-management create-obo-token` command with new required argument order. + * Changed `databricks token-management get` command to return . + * Changed `databricks dashboards create` command . New request type is . + * Added `databricks dashboards update` command. + +OpenAPI commit c40670f5a2055c92cf0a6aac92a5bccebfb80866 (2024-02-14) +Dependency updates: + * Bump github.com/hashicorp/hc-install from 0.6.2 to 0.6.3 ([#1200](https://github.com/databricks/cli/pull/1200)). + * Bump golang.org/x/term from 0.16.0 to 0.17.0 ([#1197](https://github.com/databricks/cli/pull/1197)). + * Bump golang.org/x/oauth2 from 0.16.0 to 0.17.0 ([#1198](https://github.com/databricks/cli/pull/1198)). + * Bump github.com/databricks/databricks-sdk-go from 0.30.1 to 0.32.0 ([#1199](https://github.com/databricks/cli/pull/1199)). + ## 0.212.4 Bundles: From ed00c85843341f9b0c338685d55c85f6fc815d10 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Fri, 16 Feb 2024 07:35:52 +0100 Subject: [PATCH 035/286] Add fork-user to winget release workflow (#1214) ## Changes From https://github.com/vedantmgoyal2009/winget-releaser: > If you are forking [winget-pkgs](https://github.com/microsoft/winget-pkgs) on a different account (e.g. bot/personal account), you can use the fork-user input to specify the username of the account where the fork is present. This PR makes that change. ## Tests --- .github/workflows/release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 044324edc..43ceea2cd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -128,3 +128,4 @@ jobs: identifier: Databricks.DatabricksCLI installers-regex: 'windows_.*\.zip$' # Only windows releases token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }} + fork-user: eng-dev-ecosystem-bot From ffae10d9045547124bd08cb7a89facd2b27155cf Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 16 Feb 2024 08:05:45 +0100 Subject: [PATCH 036/286] Bump Terraform provider to v1.36.2 (#1215) ## Changes * Update `go.mod` with latest dependencies * Update `go.mod` to require Go 1.21 to match root `go.mod` * Regenerate structs for Terraform provider v1.36.2 ## Tests n/a --- bundle/internal/tf/codegen/go.mod | 22 +- bundle/internal/tf/codegen/go.sum | 239 ++++-------------- bundle/internal/tf/codegen/schema/version.go | 2 +- .../data_source_aws_unity_catalog_policy.go | 12 + .../internal/tf/schema/data_source_cluster.go | 8 +- .../schema/data_source_current_metastore.go | 29 +++ .../tf/schema/data_source_directory.go | 7 +- bundle/internal/tf/schema/data_source_job.go | 28 +- .../tf/schema/data_source_sql_warehouse.go | 32 ++- .../internal/tf/schema/data_source_volumes.go | 10 + bundle/internal/tf/schema/data_sources.go | 6 + bundle/internal/tf/schema/resource_cluster.go | 23 +- .../internal/tf/schema/resource_directory.go | 1 + bundle/internal/tf/schema/resource_grant.go | 22 ++ bundle/internal/tf/schema/resource_grants.go | 2 - bundle/internal/tf/schema/resource_job.go | 28 +- .../schema/resource_metastore_data_access.go | 2 + .../tf/schema/resource_model_serving.go | 29 ++- .../resource_mws_private_access_settings.go | 1 - .../tf/schema/resource_mws_workspaces.go | 1 + .../internal/tf/schema/resource_pipeline.go | 8 +- bundle/internal/tf/schema/resource_repo.go | 1 + .../tf/schema/resource_sql_endpoint.go | 50 ++-- .../tf/schema/resource_storage_credential.go | 2 + bundle/internal/tf/schema/resources.go | 2 + bundle/internal/tf/schema/root.go | 2 +- 26 files changed, 285 insertions(+), 284 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go create mode 100644 bundle/internal/tf/schema/data_source_current_metastore.go create mode 100644 bundle/internal/tf/schema/data_source_volumes.go create mode 100644 bundle/internal/tf/schema/resource_grant.go diff --git a/bundle/internal/tf/codegen/go.mod b/bundle/internal/tf/codegen/go.mod index 7820cb705..67ac4bbc7 100644 --- a/bundle/internal/tf/codegen/go.mod +++ b/bundle/internal/tf/codegen/go.mod @@ -1,20 +1,24 @@ module github.com/databricks/cli/bundle/internal/tf/codegen -go 1.18 +go 1.21 require ( github.com/hashicorp/go-version v1.6.0 - github.com/hashicorp/hc-install v0.5.0 - github.com/hashicorp/terraform-exec v0.17.3 - github.com/hashicorp/terraform-json v0.15.0 - github.com/iancoleman/strcase v0.2.0 - github.com/zclconf/go-cty v1.12.1 - golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb + github.com/hashicorp/hc-install v0.6.3 + github.com/hashicorp/terraform-exec v0.20.0 + github.com/hashicorp/terraform-json v0.21.0 + github.com/iancoleman/strcase v0.3.0 + github.com/zclconf/go-cty v1.14.2 + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a ) require ( + github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/mod v0.8.0 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect ) diff --git a/bundle/internal/tf/codegen/go.sum b/bundle/internal/tf/codegen/go.sum index 3ebd90ccb..7a4023ba5 100644 --- a/bundle/internal/tf/codegen/go.sum +++ b/bundle/internal/tf/codegen/go.sum @@ -1,195 +1,68 @@ -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.5.0 h1:D9bl4KayIYKEeJ4vUDe9L5huqxZXczKaykSRcmQ0xY0= -github.com/hashicorp/hc-install v0.5.0/go.mod h1:JyzMfbzfSBSjoDCRPna1vi/24BEDxFaCPfdHtM5SCdo= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= -github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= -github.com/hashicorp/terraform-json v0.15.0 h1:/gIyNtR6SFw6h5yzlbDbACyGvIhKtQi8mTsbkNd79lE= -github.com/hashicorp/terraform-json v0.15.0/go.mod h1:+L1RNzjDU5leLFZkHTFTbJXaoqUC6TqXlFgDoOXrtvk= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= +github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= +github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= +github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= +github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= +github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= +github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index d141592a8..c79319eda 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.31.1" +const ProviderVersion = "1.36.2" diff --git a/bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go b/bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go new file mode 100644 index 000000000..2832bdf72 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go @@ -0,0 +1,12 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAwsUnityCatalogPolicy struct { + AwsAccountId string `json:"aws_account_id"` + BucketName string `json:"bucket_name"` + Id string `json:"id,omitempty"` + Json string `json:"json,omitempty"` + KmsName string `json:"kms_name,omitempty"` + RoleName string `json:"role_name"` +} diff --git a/bundle/internal/tf/schema/data_source_cluster.go b/bundle/internal/tf/schema/data_source_cluster.go index ce1ad034c..d34d63a79 100644 --- a/bundle/internal/tf/schema/data_source_cluster.go +++ b/bundle/internal/tf/schema/data_source_cluster.go @@ -96,7 +96,7 @@ type DataSourceClusterClusterInfoGcpAttributes struct { } type DataSourceClusterClusterInfoInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceClusterClusterInfoInitScriptsDbfs struct { @@ -104,11 +104,11 @@ type DataSourceClusterClusterInfoInitScriptsDbfs struct { } type DataSourceClusterClusterInfoInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceClusterClusterInfoInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceClusterClusterInfoInitScriptsS3 struct { @@ -126,7 +126,7 @@ type DataSourceClusterClusterInfoInitScriptsVolumes struct { } type DataSourceClusterClusterInfoInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceClusterClusterInfoInitScripts struct { diff --git a/bundle/internal/tf/schema/data_source_current_metastore.go b/bundle/internal/tf/schema/data_source_current_metastore.go new file mode 100644 index 000000000..11e647fd3 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_current_metastore.go @@ -0,0 +1,29 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceCurrentMetastoreMetastoreInfo struct { + Cloud string `json:"cloud,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + DefaultDataAccessConfigId string `json:"default_data_access_config_id,omitempty"` + DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"` + DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"` + DeltaSharingScope string `json:"delta_sharing_scope,omitempty"` + GlobalMetastoreId string `json:"global_metastore_id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + PrivilegeModelVersion string `json:"privilege_model_version,omitempty"` + Region string `json:"region,omitempty"` + StorageRoot string `json:"storage_root,omitempty"` + StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"` + StorageRootCredentialName string `json:"storage_root_credential_name,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` +} + +type DataSourceCurrentMetastore struct { + Id string `json:"id,omitempty"` + MetastoreInfo *DataSourceCurrentMetastoreMetastoreInfo `json:"metastore_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_directory.go b/bundle/internal/tf/schema/data_source_directory.go index 6841b6074..555c8d756 100644 --- a/bundle/internal/tf/schema/data_source_directory.go +++ b/bundle/internal/tf/schema/data_source_directory.go @@ -3,7 +3,8 @@ package schema type DataSourceDirectory struct { - Id string `json:"id,omitempty"` - ObjectId int `json:"object_id,omitempty"` - Path string `json:"path"` + Id string `json:"id,omitempty"` + ObjectId int `json:"object_id,omitempty"` + Path string `json:"path"` + WorkspacePath string `json:"workspace_path,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index 75d3672bc..f9a316d78 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -134,7 +134,7 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterGcpAttributes struct { } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsDbfs struct { @@ -142,11 +142,11 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsDbfs struct } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 struct { @@ -164,7 +164,7 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsVolumes stru } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScripts struct { @@ -321,7 +321,7 @@ type DataSourceJobJobSettingsSettingsNewClusterGcpAttributes struct { } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsDbfs struct { @@ -329,11 +329,11 @@ type DataSourceJobJobSettingsSettingsNewClusterInitScriptsDbfs struct { } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 struct { @@ -351,7 +351,7 @@ type DataSourceJobJobSettingsSettingsNewClusterInitScriptsVolumes struct { } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsNewClusterInitScripts struct { @@ -418,8 +418,8 @@ type DataSourceJobJobSettingsSettingsNotificationSettings struct { } type DataSourceJobJobSettingsSettingsParameter struct { - Default string `json:"default,omitempty"` - Name string `json:"name,omitempty"` + Default string `json:"default"` + Name string `json:"name"` } type DataSourceJobJobSettingsSettingsPipelineTask struct { @@ -604,7 +604,7 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterGcpAttributes struct { } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsDbfs struct { @@ -612,11 +612,11 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsDbfs struct { } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 struct { @@ -634,7 +634,7 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsVolumes struct { } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScripts struct { diff --git a/bundle/internal/tf/schema/data_source_sql_warehouse.go b/bundle/internal/tf/schema/data_source_sql_warehouse.go index 218591d09..05212f0bd 100644 --- a/bundle/internal/tf/schema/data_source_sql_warehouse.go +++ b/bundle/internal/tf/schema/data_source_sql_warehouse.go @@ -3,20 +3,34 @@ package schema type DataSourceSqlWarehouseChannel struct { - Name string `json:"name,omitempty"` + DbsqlVersion string `json:"dbsql_version,omitempty"` + Name string `json:"name,omitempty"` +} + +type DataSourceSqlWarehouseHealthFailureReason struct { + Code string `json:"code,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` + Type string `json:"type,omitempty"` +} + +type DataSourceSqlWarehouseHealth struct { + Details string `json:"details,omitempty"` + Message string `json:"message,omitempty"` + Status string `json:"status,omitempty"` + Summary string `json:"summary,omitempty"` + FailureReason *DataSourceSqlWarehouseHealthFailureReason `json:"failure_reason,omitempty"` } type DataSourceSqlWarehouseOdbcParams struct { - Host string `json:"host,omitempty"` Hostname string `json:"hostname,omitempty"` - Path string `json:"path"` - Port int `json:"port"` - Protocol string `json:"protocol"` + Path string `json:"path,omitempty"` + Port int `json:"port,omitempty"` + Protocol string `json:"protocol,omitempty"` } type DataSourceSqlWarehouseTagsCustomTags struct { - Key string `json:"key"` - Value string `json:"value"` + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` } type DataSourceSqlWarehouseTags struct { @@ -26,6 +40,7 @@ type DataSourceSqlWarehouseTags struct { type DataSourceSqlWarehouse struct { AutoStopMins int `json:"auto_stop_mins,omitempty"` ClusterSize string `json:"cluster_size,omitempty"` + CreatorName string `json:"creator_name,omitempty"` DataSourceId string `json:"data_source_id,omitempty"` EnablePhoton bool `json:"enable_photon,omitempty"` EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` @@ -35,10 +50,13 @@ type DataSourceSqlWarehouse struct { MaxNumClusters int `json:"max_num_clusters,omitempty"` MinNumClusters int `json:"min_num_clusters,omitempty"` Name string `json:"name,omitempty"` + NumActiveSessions int `json:"num_active_sessions,omitempty"` NumClusters int `json:"num_clusters,omitempty"` SpotInstancePolicy string `json:"spot_instance_policy,omitempty"` State string `json:"state,omitempty"` + WarehouseType string `json:"warehouse_type,omitempty"` Channel *DataSourceSqlWarehouseChannel `json:"channel,omitempty"` + Health *DataSourceSqlWarehouseHealth `json:"health,omitempty"` OdbcParams *DataSourceSqlWarehouseOdbcParams `json:"odbc_params,omitempty"` Tags *DataSourceSqlWarehouseTags `json:"tags,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_volumes.go b/bundle/internal/tf/schema/data_source_volumes.go new file mode 100644 index 000000000..07bf59338 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_volumes.go @@ -0,0 +1,10 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceVolumes struct { + CatalogName string `json:"catalog_name"` + Id string `json:"id,omitempty"` + Ids []string `json:"ids,omitempty"` + SchemaName string `json:"schema_name"` +} diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index c61ab9096..7c48a8471 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -6,11 +6,13 @@ type DataSources struct { AwsAssumeRolePolicy map[string]*DataSourceAwsAssumeRolePolicy `json:"databricks_aws_assume_role_policy,omitempty"` AwsBucketPolicy map[string]*DataSourceAwsBucketPolicy `json:"databricks_aws_bucket_policy,omitempty"` AwsCrossaccountPolicy map[string]*DataSourceAwsCrossaccountPolicy `json:"databricks_aws_crossaccount_policy,omitempty"` + AwsUnityCatalogPolicy map[string]*DataSourceAwsUnityCatalogPolicy `json:"databricks_aws_unity_catalog_policy,omitempty"` Catalogs map[string]*DataSourceCatalogs `json:"databricks_catalogs,omitempty"` Cluster map[string]*DataSourceCluster `json:"databricks_cluster,omitempty"` ClusterPolicy map[string]*DataSourceClusterPolicy `json:"databricks_cluster_policy,omitempty"` Clusters map[string]*DataSourceClusters `json:"databricks_clusters,omitempty"` CurrentConfig map[string]*DataSourceCurrentConfig `json:"databricks_current_config,omitempty"` + CurrentMetastore map[string]*DataSourceCurrentMetastore `json:"databricks_current_metastore,omitempty"` CurrentUser map[string]*DataSourceCurrentUser `json:"databricks_current_user,omitempty"` DbfsFile map[string]*DataSourceDbfsFile `json:"databricks_dbfs_file,omitempty"` DbfsFilePaths map[string]*DataSourceDbfsFilePaths `json:"databricks_dbfs_file_paths,omitempty"` @@ -40,6 +42,7 @@ type DataSources struct { Tables map[string]*DataSourceTables `json:"databricks_tables,omitempty"` User map[string]*DataSourceUser `json:"databricks_user,omitempty"` Views map[string]*DataSourceViews `json:"databricks_views,omitempty"` + Volumes map[string]*DataSourceVolumes `json:"databricks_volumes,omitempty"` Zones map[string]*DataSourceZones `json:"databricks_zones,omitempty"` } @@ -48,11 +51,13 @@ func NewDataSources() *DataSources { AwsAssumeRolePolicy: make(map[string]*DataSourceAwsAssumeRolePolicy), AwsBucketPolicy: make(map[string]*DataSourceAwsBucketPolicy), AwsCrossaccountPolicy: make(map[string]*DataSourceAwsCrossaccountPolicy), + AwsUnityCatalogPolicy: make(map[string]*DataSourceAwsUnityCatalogPolicy), Catalogs: make(map[string]*DataSourceCatalogs), Cluster: make(map[string]*DataSourceCluster), ClusterPolicy: make(map[string]*DataSourceClusterPolicy), Clusters: make(map[string]*DataSourceClusters), CurrentConfig: make(map[string]*DataSourceCurrentConfig), + CurrentMetastore: make(map[string]*DataSourceCurrentMetastore), CurrentUser: make(map[string]*DataSourceCurrentUser), DbfsFile: make(map[string]*DataSourceDbfsFile), DbfsFilePaths: make(map[string]*DataSourceDbfsFilePaths), @@ -82,6 +87,7 @@ func NewDataSources() *DataSources { Tables: make(map[string]*DataSourceTables), User: make(map[string]*DataSourceUser), Views: make(map[string]*DataSourceViews), + Volumes: make(map[string]*DataSourceVolumes), Zones: make(map[string]*DataSourceZones), } } diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index 1d5a5ef25..1a73b35a4 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -10,7 +10,9 @@ type ResourceClusterAutoscale struct { type ResourceClusterAwsAttributes struct { Availability string `json:"availability,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -18,10 +20,16 @@ type ResourceClusterAwsAttributes struct { ZoneId string `json:"zone_id,omitempty"` } +type ResourceClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + type ResourceClusterAzureAttributes struct { - Availability string `json:"availability,omitempty"` - FirstOnDemand int `json:"first_on_demand,omitempty"` - SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourceClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } type ResourceClusterClusterLogConfDbfs struct { @@ -74,7 +82,7 @@ type ResourceClusterGcpAttributes struct { } type ResourceClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceClusterInitScriptsDbfs struct { @@ -86,7 +94,7 @@ type ResourceClusterInitScriptsFile struct { } type ResourceClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceClusterInitScriptsS3 struct { @@ -100,11 +108,11 @@ type ResourceClusterInitScriptsS3 struct { } type ResourceClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceClusterInitScripts struct { @@ -156,6 +164,7 @@ type ResourceCluster struct { AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` + ClusterSource string `json:"cluster_source,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` DataSecurityMode string `json:"data_security_mode,omitempty"` DefaultTags map[string]string `json:"default_tags,omitempty"` diff --git a/bundle/internal/tf/schema/resource_directory.go b/bundle/internal/tf/schema/resource_directory.go index f418edded..ee7cf1607 100644 --- a/bundle/internal/tf/schema/resource_directory.go +++ b/bundle/internal/tf/schema/resource_directory.go @@ -7,4 +7,5 @@ type ResourceDirectory struct { Id string `json:"id,omitempty"` ObjectId int `json:"object_id,omitempty"` Path string `json:"path"` + WorkspacePath string `json:"workspace_path,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_grant.go b/bundle/internal/tf/schema/resource_grant.go new file mode 100644 index 000000000..d8569f304 --- /dev/null +++ b/bundle/internal/tf/schema/resource_grant.go @@ -0,0 +1,22 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceGrant struct { + Catalog string `json:"catalog,omitempty"` + ExternalLocation string `json:"external_location,omitempty"` + ForeignConnection string `json:"foreign_connection,omitempty"` + Function string `json:"function,omitempty"` + Id string `json:"id,omitempty"` + Metastore string `json:"metastore,omitempty"` + Model string `json:"model,omitempty"` + Pipeline string `json:"pipeline,omitempty"` + Principal string `json:"principal"` + Privileges []string `json:"privileges"` + Recipient string `json:"recipient,omitempty"` + Schema string `json:"schema,omitempty"` + Share string `json:"share,omitempty"` + StorageCredential string `json:"storage_credential,omitempty"` + Table string `json:"table,omitempty"` + Volume string `json:"volume,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_grants.go b/bundle/internal/tf/schema/resource_grants.go index 09b958f82..22861005f 100644 --- a/bundle/internal/tf/schema/resource_grants.go +++ b/bundle/internal/tf/schema/resource_grants.go @@ -13,14 +13,12 @@ type ResourceGrants struct { ForeignConnection string `json:"foreign_connection,omitempty"` Function string `json:"function,omitempty"` Id string `json:"id,omitempty"` - MaterializedView string `json:"materialized_view,omitempty"` Metastore string `json:"metastore,omitempty"` Model string `json:"model,omitempty"` Schema string `json:"schema,omitempty"` Share string `json:"share,omitempty"` StorageCredential string `json:"storage_credential,omitempty"` Table string `json:"table,omitempty"` - View string `json:"view,omitempty"` Volume string `json:"volume,omitempty"` Grant []ResourceGrantsGrant `json:"grant,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 7884efd79..96c0c2970 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -134,7 +134,7 @@ type ResourceJobJobClusterNewClusterGcpAttributes struct { } type ResourceJobJobClusterNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobJobClusterNewClusterInitScriptsDbfs struct { @@ -142,11 +142,11 @@ type ResourceJobJobClusterNewClusterInitScriptsDbfs struct { } type ResourceJobJobClusterNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobJobClusterNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobJobClusterNewClusterInitScriptsS3 struct { @@ -164,7 +164,7 @@ type ResourceJobJobClusterNewClusterInitScriptsVolumes struct { } type ResourceJobJobClusterNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobJobClusterNewClusterInitScripts struct { @@ -321,7 +321,7 @@ type ResourceJobNewClusterGcpAttributes struct { } type ResourceJobNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobNewClusterInitScriptsDbfs struct { @@ -329,11 +329,11 @@ type ResourceJobNewClusterInitScriptsDbfs struct { } type ResourceJobNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobNewClusterInitScriptsS3 struct { @@ -351,7 +351,7 @@ type ResourceJobNewClusterInitScriptsVolumes struct { } type ResourceJobNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobNewClusterInitScripts struct { @@ -418,8 +418,8 @@ type ResourceJobNotificationSettings struct { } type ResourceJobParameter struct { - Default string `json:"default,omitempty"` - Name string `json:"name,omitempty"` + Default string `json:"default"` + Name string `json:"name"` } type ResourceJobPipelineTask struct { @@ -604,7 +604,7 @@ type ResourceJobTaskNewClusterGcpAttributes struct { } type ResourceJobTaskNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobTaskNewClusterInitScriptsDbfs struct { @@ -612,11 +612,11 @@ type ResourceJobTaskNewClusterInitScriptsDbfs struct { } type ResourceJobTaskNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobTaskNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobTaskNewClusterInitScriptsS3 struct { @@ -634,7 +634,7 @@ type ResourceJobTaskNewClusterInitScriptsVolumes struct { } type ResourceJobTaskNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobTaskNewClusterInitScripts struct { diff --git a/bundle/internal/tf/schema/resource_metastore_data_access.go b/bundle/internal/tf/schema/resource_metastore_data_access.go index ec1395f71..155730055 100644 --- a/bundle/internal/tf/schema/resource_metastore_data_access.go +++ b/bundle/internal/tf/schema/resource_metastore_data_access.go @@ -34,12 +34,14 @@ type ResourceMetastoreDataAccessGcpServiceAccountKey struct { type ResourceMetastoreDataAccess struct { Comment string `json:"comment,omitempty"` ForceDestroy bool `json:"force_destroy,omitempty"` + ForceUpdate bool `json:"force_update,omitempty"` Id string `json:"id,omitempty"` IsDefault bool `json:"is_default,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name"` Owner string `json:"owner,omitempty"` ReadOnly bool `json:"read_only,omitempty"` + SkipValidation bool `json:"skip_validation,omitempty"` AwsIamRole *ResourceMetastoreDataAccessAwsIamRole `json:"aws_iam_role,omitempty"` AzureManagedIdentity *ResourceMetastoreDataAccessAzureManagedIdentity `json:"azure_managed_identity,omitempty"` AzureServicePrincipal *ResourceMetastoreDataAccessAzureServicePrincipal `json:"azure_service_principal,omitempty"` diff --git a/bundle/internal/tf/schema/resource_model_serving.go b/bundle/internal/tf/schema/resource_model_serving.go index b0cabbe5a..68265d9c0 100644 --- a/bundle/internal/tf/schema/resource_model_serving.go +++ b/bundle/internal/tf/schema/resource_model_serving.go @@ -2,6 +2,13 @@ package schema +type ResourceModelServingConfigAutoCaptureConfig struct { + CatalogName string `json:"catalog_name,omitempty"` + Enabled bool `json:"enabled,omitempty"` + SchemaName string `json:"schema_name,omitempty"` + TableNamePrefix string `json:"table_name_prefix,omitempty"` +} + type ResourceModelServingConfigServedModels struct { EnvironmentVars map[string]string `json:"environment_vars,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -23,8 +30,15 @@ type ResourceModelServingConfigTrafficConfig struct { } type ResourceModelServingConfig struct { - ServedModels []ResourceModelServingConfigServedModels `json:"served_models,omitempty"` - TrafficConfig *ResourceModelServingConfigTrafficConfig `json:"traffic_config,omitempty"` + AutoCaptureConfig *ResourceModelServingConfigAutoCaptureConfig `json:"auto_capture_config,omitempty"` + ServedModels []ResourceModelServingConfigServedModels `json:"served_models,omitempty"` + TrafficConfig *ResourceModelServingConfigTrafficConfig `json:"traffic_config,omitempty"` +} + +type ResourceModelServingRateLimits struct { + Calls int `json:"calls"` + Key string `json:"key,omitempty"` + RenewalPeriod string `json:"renewal_period"` } type ResourceModelServingTags struct { @@ -33,9 +47,10 @@ type ResourceModelServingTags struct { } type ResourceModelServing struct { - Id string `json:"id,omitempty"` - Name string `json:"name"` - ServingEndpointId string `json:"serving_endpoint_id,omitempty"` - Config *ResourceModelServingConfig `json:"config,omitempty"` - Tags []ResourceModelServingTags `json:"tags,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + ServingEndpointId string `json:"serving_endpoint_id,omitempty"` + Config *ResourceModelServingConfig `json:"config,omitempty"` + RateLimits []ResourceModelServingRateLimits `json:"rate_limits,omitempty"` + Tags []ResourceModelServingTags `json:"tags,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_mws_private_access_settings.go b/bundle/internal/tf/schema/resource_mws_private_access_settings.go index 2c9bdfeca..c7c40aabf 100644 --- a/bundle/internal/tf/schema/resource_mws_private_access_settings.go +++ b/bundle/internal/tf/schema/resource_mws_private_access_settings.go @@ -11,5 +11,4 @@ type ResourceMwsPrivateAccessSettings struct { PrivateAccessSettingsName string `json:"private_access_settings_name"` PublicAccessEnabled bool `json:"public_access_enabled,omitempty"` Region string `json:"region"` - Status string `json:"status,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_mws_workspaces.go b/bundle/internal/tf/schema/resource_mws_workspaces.go index 83d0ab909..21d1ce428 100644 --- a/bundle/internal/tf/schema/resource_mws_workspaces.go +++ b/bundle/internal/tf/schema/resource_mws_workspaces.go @@ -40,6 +40,7 @@ type ResourceMwsWorkspaces struct { Cloud string `json:"cloud,omitempty"` CreationTime int `json:"creation_time,omitempty"` CredentialsId string `json:"credentials_id,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` CustomerManagedKeyId string `json:"customer_managed_key_id,omitempty"` DeploymentName string `json:"deployment_name,omitempty"` Id string `json:"id,omitempty"` diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 72354f621..8737985c9 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -52,7 +52,7 @@ type ResourcePipelineClusterGcpAttributes struct { } type ResourcePipelineClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourcePipelineClusterInitScriptsDbfs struct { @@ -60,11 +60,11 @@ type ResourcePipelineClusterInitScriptsDbfs struct { } type ResourcePipelineClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourcePipelineClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourcePipelineClusterInitScriptsS3 struct { @@ -82,7 +82,7 @@ type ResourcePipelineClusterInitScriptsVolumes struct { } type ResourcePipelineClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourcePipelineClusterInitScripts struct { diff --git a/bundle/internal/tf/schema/resource_repo.go b/bundle/internal/tf/schema/resource_repo.go index 583ab097a..6f2945072 100644 --- a/bundle/internal/tf/schema/resource_repo.go +++ b/bundle/internal/tf/schema/resource_repo.go @@ -14,5 +14,6 @@ type ResourceRepo struct { Path string `json:"path,omitempty"` Tag string `json:"tag,omitempty"` Url string `json:"url"` + WorkspacePath string `json:"workspace_path,omitempty"` SparseCheckout *ResourceRepoSparseCheckout `json:"sparse_checkout,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_sql_endpoint.go b/bundle/internal/tf/schema/resource_sql_endpoint.go index c48261b96..b49a7cff5 100644 --- a/bundle/internal/tf/schema/resource_sql_endpoint.go +++ b/bundle/internal/tf/schema/resource_sql_endpoint.go @@ -3,15 +3,8 @@ package schema type ResourceSqlEndpointChannel struct { - Name string `json:"name,omitempty"` -} - -type ResourceSqlEndpointOdbcParams struct { - Host string `json:"host,omitempty"` - Hostname string `json:"hostname,omitempty"` - Path string `json:"path"` - Port int `json:"port"` - Protocol string `json:"protocol"` + DbsqlVersion string `json:"dbsql_version,omitempty"` + Name string `json:"name,omitempty"` } type ResourceSqlEndpointTagsCustomTags struct { @@ -24,22 +17,25 @@ type ResourceSqlEndpointTags struct { } type ResourceSqlEndpoint struct { - AutoStopMins int `json:"auto_stop_mins,omitempty"` - ClusterSize string `json:"cluster_size"` - DataSourceId string `json:"data_source_id,omitempty"` - EnablePhoton bool `json:"enable_photon,omitempty"` - EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` - Id string `json:"id,omitempty"` - InstanceProfileArn string `json:"instance_profile_arn,omitempty"` - JdbcUrl string `json:"jdbc_url,omitempty"` - MaxNumClusters int `json:"max_num_clusters,omitempty"` - MinNumClusters int `json:"min_num_clusters,omitempty"` - Name string `json:"name"` - NumClusters int `json:"num_clusters,omitempty"` - SpotInstancePolicy string `json:"spot_instance_policy,omitempty"` - State string `json:"state,omitempty"` - WarehouseType string `json:"warehouse_type,omitempty"` - Channel *ResourceSqlEndpointChannel `json:"channel,omitempty"` - OdbcParams *ResourceSqlEndpointOdbcParams `json:"odbc_params,omitempty"` - Tags *ResourceSqlEndpointTags `json:"tags,omitempty"` + AutoStopMins int `json:"auto_stop_mins,omitempty"` + ClusterSize string `json:"cluster_size"` + CreatorName string `json:"creator_name,omitempty"` + DataSourceId string `json:"data_source_id,omitempty"` + EnablePhoton bool `json:"enable_photon,omitempty"` + EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` + Health []any `json:"health,omitempty"` + Id string `json:"id,omitempty"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + JdbcUrl string `json:"jdbc_url,omitempty"` + MaxNumClusters int `json:"max_num_clusters,omitempty"` + MinNumClusters int `json:"min_num_clusters,omitempty"` + Name string `json:"name"` + NumActiveSessions int `json:"num_active_sessions,omitempty"` + NumClusters int `json:"num_clusters,omitempty"` + OdbcParams []any `json:"odbc_params,omitempty"` + SpotInstancePolicy string `json:"spot_instance_policy,omitempty"` + State string `json:"state,omitempty"` + WarehouseType string `json:"warehouse_type,omitempty"` + Channel *ResourceSqlEndpointChannel `json:"channel,omitempty"` + Tags *ResourceSqlEndpointTags `json:"tags,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_storage_credential.go b/bundle/internal/tf/schema/resource_storage_credential.go index 1f103023d..3d4a501ea 100644 --- a/bundle/internal/tf/schema/resource_storage_credential.go +++ b/bundle/internal/tf/schema/resource_storage_credential.go @@ -34,11 +34,13 @@ type ResourceStorageCredentialGcpServiceAccountKey struct { type ResourceStorageCredential struct { Comment string `json:"comment,omitempty"` ForceDestroy bool `json:"force_destroy,omitempty"` + ForceUpdate bool `json:"force_update,omitempty"` Id string `json:"id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name"` Owner string `json:"owner,omitempty"` ReadOnly bool `json:"read_only,omitempty"` + SkipValidation bool `json:"skip_validation,omitempty"` AwsIamRole *ResourceStorageCredentialAwsIamRole `json:"aws_iam_role,omitempty"` AzureManagedIdentity *ResourceStorageCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"` AzureServicePrincipal *ResourceStorageCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"` diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index 4519a5686..0a468ba92 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -21,6 +21,7 @@ type Resources struct { ExternalLocation map[string]*ResourceExternalLocation `json:"databricks_external_location,omitempty"` GitCredential map[string]*ResourceGitCredential `json:"databricks_git_credential,omitempty"` GlobalInitScript map[string]*ResourceGlobalInitScript `json:"databricks_global_init_script,omitempty"` + Grant map[string]*ResourceGrant `json:"databricks_grant,omitempty"` Grants map[string]*ResourceGrants `json:"databricks_grants,omitempty"` Group map[string]*ResourceGroup `json:"databricks_group,omitempty"` GroupInstanceProfile map[string]*ResourceGroupInstanceProfile `json:"databricks_group_instance_profile,omitempty"` @@ -106,6 +107,7 @@ func NewResources() *Resources { ExternalLocation: make(map[string]*ResourceExternalLocation), GitCredential: make(map[string]*ResourceGitCredential), GlobalInitScript: make(map[string]*ResourceGlobalInitScript), + Grant: make(map[string]*ResourceGrant), Grants: make(map[string]*ResourceGrants), Group: make(map[string]*ResourceGroup), GroupInstanceProfile: make(map[string]*ResourceGroupInstanceProfile), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 937182d4d..963ae1460 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -25,7 +25,7 @@ func NewRoot() *Root { "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ "source": "databricks/databricks", - "version": "1.31.1", + "version": "1.36.2", }, }, }, From 788ec8178537ca6eeca00386a7952c0d063f7e15 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 16 Feb 2024 13:46:24 +0100 Subject: [PATCH 037/286] Use `any` as type for data sources and resources in `tf/schema` (#1216) ## Changes We plan to use the any-equivalent of a `dyn.Value` such that we can use variable references for non-string fields (e.g. `${databricks_job.some_job.id}` where an integer is expected), as well as properly emit zero values for primitive types (e.g. 0 for integers or false for booleans). This change is in preparation for the above. ## Tests Unit tests. --- bundle/deploy/terraform/convert_test.go | 128 +++---- .../tf/codegen/templates/data_sources.go.tmpl | 4 +- .../tf/codegen/templates/resources.go.tmpl | 4 +- bundle/internal/tf/schema/data_sources.go | 164 ++++----- bundle/internal/tf/schema/resources.go | 328 +++++++++--------- 5 files changed, 320 insertions(+), 308 deletions(-) diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index bb77f287b..afef37088 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -55,12 +56,14 @@ func TestBundleToTerraformJob(t *testing.T) { } out := BundleToTerraform(&config) - assert.Equal(t, "my job", out.Resource.Job["my_job"].Name) - assert.Len(t, out.Resource.Job["my_job"].JobCluster, 1) - assert.Equal(t, "https://github.com/foo/bar", out.Resource.Job["my_job"].GitSource.Url) - assert.Len(t, out.Resource.Job["my_job"].Parameter, 2) - assert.Equal(t, "param1", out.Resource.Job["my_job"].Parameter[0].Name) - assert.Equal(t, "param2", out.Resource.Job["my_job"].Parameter[1].Name) + resource := out.Resource.Job["my_job"].(*schema.ResourceJob) + + assert.Equal(t, "my job", resource.Name) + assert.Len(t, resource.JobCluster, 1) + assert.Equal(t, "https://github.com/foo/bar", resource.GitSource.Url) + assert.Len(t, resource.Parameter, 2) + assert.Equal(t, "param1", resource.Parameter[0].Name) + assert.Equal(t, "param2", resource.Parameter[1].Name) assert.Nil(t, out.Data) } @@ -83,12 +86,12 @@ func TestBundleToTerraformJobPermissions(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Permissions["job_my_job"].JobId) - assert.Len(t, out.Resource.Permissions["job_my_job"].AccessControl, 1) + resource := out.Resource.Permissions["job_my_job"].(*schema.ResourcePermissions) - p := out.Resource.Permissions["job_my_job"].AccessControl[0] - assert.Equal(t, "jane@doe.com", p.UserName) - assert.Equal(t, "CAN_VIEW", p.PermissionLevel) + assert.NotEmpty(t, resource.JobId) + assert.Len(t, resource.AccessControl, 1) + assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) + assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) } func TestBundleToTerraformJobTaskLibraries(t *testing.T) { @@ -119,10 +122,12 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { } out := BundleToTerraform(&config) - assert.Equal(t, "my job", out.Resource.Job["my_job"].Name) - require.Len(t, out.Resource.Job["my_job"].Task, 1) - require.Len(t, out.Resource.Job["my_job"].Task[0].Library, 1) - assert.Equal(t, "mlflow", out.Resource.Job["my_job"].Task[0].Library[0].Pypi.Package) + resource := out.Resource.Job["my_job"].(*schema.ResourceJob) + + assert.Equal(t, "my job", resource.Name) + require.Len(t, resource.Task, 1) + require.Len(t, resource.Task[0].Library, 1) + assert.Equal(t, "mlflow", resource.Task[0].Library[0].Pypi.Package) } func TestBundleToTerraformPipeline(t *testing.T) { @@ -173,14 +178,15 @@ func TestBundleToTerraformPipeline(t *testing.T) { } out := BundleToTerraform(&config) - assert.Equal(t, "my pipeline", out.Resource.Pipeline["my_pipeline"].Name) - assert.Len(t, out.Resource.Pipeline["my_pipeline"].Library, 2) - notifs := out.Resource.Pipeline["my_pipeline"].Notification - assert.Len(t, notifs, 2) - assert.Equal(t, notifs[0].Alerts, []string{"on-update-fatal-failure"}) - assert.Equal(t, notifs[0].EmailRecipients, []string{"jane@doe.com"}) - assert.Equal(t, notifs[1].Alerts, []string{"on-update-failure", "on-flow-failure"}) - assert.Equal(t, notifs[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"}) + resource := out.Resource.Pipeline["my_pipeline"].(*schema.ResourcePipeline) + + assert.Equal(t, "my pipeline", resource.Name) + assert.Len(t, resource.Library, 2) + assert.Len(t, resource.Notification, 2) + assert.Equal(t, resource.Notification[0].Alerts, []string{"on-update-fatal-failure"}) + assert.Equal(t, resource.Notification[0].EmailRecipients, []string{"jane@doe.com"}) + assert.Equal(t, resource.Notification[1].Alerts, []string{"on-update-failure", "on-flow-failure"}) + assert.Equal(t, resource.Notification[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"}) assert.Nil(t, out.Data) } @@ -203,12 +209,12 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Permissions["pipeline_my_pipeline"].PipelineId) - assert.Len(t, out.Resource.Permissions["pipeline_my_pipeline"].AccessControl, 1) + resource := out.Resource.Permissions["pipeline_my_pipeline"].(*schema.ResourcePermissions) - p := out.Resource.Permissions["pipeline_my_pipeline"].AccessControl[0] - assert.Equal(t, "jane@doe.com", p.UserName) - assert.Equal(t, "CAN_VIEW", p.PermissionLevel) + assert.NotEmpty(t, resource.PipelineId) + assert.Len(t, resource.AccessControl, 1) + assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) + assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) } func TestBundleToTerraformModel(t *testing.T) { @@ -238,13 +244,15 @@ func TestBundleToTerraformModel(t *testing.T) { } out := BundleToTerraform(&config) - assert.Equal(t, "name", out.Resource.MlflowModel["my_model"].Name) - assert.Equal(t, "description", out.Resource.MlflowModel["my_model"].Description) - assert.Len(t, out.Resource.MlflowModel["my_model"].Tags, 2) - assert.Equal(t, "k1", out.Resource.MlflowModel["my_model"].Tags[0].Key) - assert.Equal(t, "v1", out.Resource.MlflowModel["my_model"].Tags[0].Value) - assert.Equal(t, "k2", out.Resource.MlflowModel["my_model"].Tags[1].Key) - assert.Equal(t, "v2", out.Resource.MlflowModel["my_model"].Tags[1].Value) + resource := out.Resource.MlflowModel["my_model"].(*schema.ResourceMlflowModel) + + assert.Equal(t, "name", resource.Name) + assert.Equal(t, "description", resource.Description) + assert.Len(t, resource.Tags, 2) + assert.Equal(t, "k1", resource.Tags[0].Key) + assert.Equal(t, "v1", resource.Tags[0].Value) + assert.Equal(t, "k2", resource.Tags[1].Key) + assert.Equal(t, "v2", resource.Tags[1].Value) assert.Nil(t, out.Data) } @@ -267,12 +275,12 @@ func TestBundleToTerraformModelPermissions(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Permissions["mlflow_model_my_model"].RegisteredModelId) - assert.Len(t, out.Resource.Permissions["mlflow_model_my_model"].AccessControl, 1) + resource := out.Resource.Permissions["mlflow_model_my_model"].(*schema.ResourcePermissions) - p := out.Resource.Permissions["mlflow_model_my_model"].AccessControl[0] - assert.Equal(t, "jane@doe.com", p.UserName) - assert.Equal(t, "CAN_READ", p.PermissionLevel) + assert.NotEmpty(t, resource.RegisteredModelId) + assert.Len(t, resource.AccessControl, 1) + assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) + assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel) } func TestBundleToTerraformExperiment(t *testing.T) { @@ -291,7 +299,9 @@ func TestBundleToTerraformExperiment(t *testing.T) { } out := BundleToTerraform(&config) - assert.Equal(t, "name", out.Resource.MlflowExperiment["my_experiment"].Name) + resource := out.Resource.MlflowExperiment["my_experiment"].(*schema.ResourceMlflowExperiment) + + assert.Equal(t, "name", resource.Name) assert.Nil(t, out.Data) } @@ -314,12 +324,12 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Permissions["mlflow_experiment_my_experiment"].ExperimentId) - assert.Len(t, out.Resource.Permissions["mlflow_experiment_my_experiment"].AccessControl, 1) + resource := out.Resource.Permissions["mlflow_experiment_my_experiment"].(*schema.ResourcePermissions) - p := out.Resource.Permissions["mlflow_experiment_my_experiment"].AccessControl[0] - assert.Equal(t, "jane@doe.com", p.UserName) - assert.Equal(t, "CAN_READ", p.PermissionLevel) + assert.NotEmpty(t, resource.ExperimentId) + assert.Len(t, resource.AccessControl, 1) + assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) + assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel) } @@ -357,7 +367,8 @@ func TestBundleToTerraformModelServing(t *testing.T) { } out := BundleToTerraform(&config) - resource := out.Resource.ModelServing["my_model_serving_endpoint"] + resource := out.Resource.ModelServing["my_model_serving_endpoint"].(*schema.ResourceModelServing) + assert.Equal(t, "name", resource.Name) assert.Equal(t, "model_name", resource.Config.ServedModels[0].ModelName) assert.Equal(t, "1", resource.Config.ServedModels[0].ModelVersion) @@ -387,12 +398,12 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Permissions["model_serving_my_model_serving_endpoint"].ServingEndpointId) - assert.Len(t, out.Resource.Permissions["model_serving_my_model_serving_endpoint"].AccessControl, 1) + resource := out.Resource.Permissions["model_serving_my_model_serving_endpoint"].(*schema.ResourcePermissions) - p := out.Resource.Permissions["model_serving_my_model_serving_endpoint"].AccessControl[0] - assert.Equal(t, "jane@doe.com", p.UserName) - assert.Equal(t, "CAN_VIEW", p.PermissionLevel) + assert.NotEmpty(t, resource.ServingEndpointId) + assert.Len(t, resource.AccessControl, 1) + assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) + assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) } @@ -415,7 +426,8 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) { } out := BundleToTerraform(&config) - resource := out.Resource.RegisteredModel["my_registered_model"] + resource := out.Resource.RegisteredModel["my_registered_model"].(*schema.ResourceRegisteredModel) + assert.Equal(t, "name", resource.Name) assert.Equal(t, "catalog", resource.CatalogName) assert.Equal(t, "schema", resource.SchemaName) @@ -442,12 +454,12 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Grants["registered_model_my_registered_model"].Function) - assert.Len(t, out.Resource.Grants["registered_model_my_registered_model"].Grant, 1) + resource := out.Resource.Grants["registered_model_my_registered_model"].(*schema.ResourceGrants) - p := out.Resource.Grants["registered_model_my_registered_model"].Grant[0] - assert.Equal(t, "jane@doe.com", p.Principal) - assert.Equal(t, "EXECUTE", p.Privileges[0]) + assert.NotEmpty(t, resource.Function) + assert.Len(t, resource.Grant, 1) + assert.Equal(t, "jane@doe.com", resource.Grant[0].Principal) + assert.Equal(t, "EXECUTE", resource.Grant[0].Privileges[0]) } func TestTerraformToBundleEmptyLocalResources(t *testing.T) { diff --git a/bundle/internal/tf/codegen/templates/data_sources.go.tmpl b/bundle/internal/tf/codegen/templates/data_sources.go.tmpl index 21baf33ea..9d998d497 100644 --- a/bundle/internal/tf/codegen/templates/data_sources.go.tmpl +++ b/bundle/internal/tf/codegen/templates/data_sources.go.tmpl @@ -4,14 +4,14 @@ package schema type DataSources struct { {{- range .Blocks }} - {{ .FieldName }} map[string]*{{ .TypeName }} `json:"{{ .TerraformName }},omitempty"` + {{ .FieldName }} map[string]any `json:"{{ .TerraformName }},omitempty"` {{- end }} } func NewDataSources() *DataSources { return &DataSources{ {{- range .Blocks }} - {{ .FieldName }}: make(map[string]*{{ .TypeName }}), + {{ .FieldName }}: make(map[string]any), {{- end }} } } diff --git a/bundle/internal/tf/codegen/templates/resources.go.tmpl b/bundle/internal/tf/codegen/templates/resources.go.tmpl index d18151871..91407a00e 100644 --- a/bundle/internal/tf/codegen/templates/resources.go.tmpl +++ b/bundle/internal/tf/codegen/templates/resources.go.tmpl @@ -4,14 +4,14 @@ package schema type Resources struct { {{- range .Blocks }} - {{ .FieldName }} map[string]*{{ .TypeName }} `json:"{{ .TerraformName }},omitempty"` + {{ .FieldName }} map[string]any `json:"{{ .TerraformName }},omitempty"` {{- end }} } func NewResources() *Resources { return &Resources{ {{- range .Blocks }} - {{ .FieldName }}: make(map[string]*{{ .TypeName }}), + {{ .FieldName }}: make(map[string]any), {{- end }} } } diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 7c48a8471..a88fa2e2e 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -3,91 +3,91 @@ package schema type DataSources struct { - AwsAssumeRolePolicy map[string]*DataSourceAwsAssumeRolePolicy `json:"databricks_aws_assume_role_policy,omitempty"` - AwsBucketPolicy map[string]*DataSourceAwsBucketPolicy `json:"databricks_aws_bucket_policy,omitempty"` - AwsCrossaccountPolicy map[string]*DataSourceAwsCrossaccountPolicy `json:"databricks_aws_crossaccount_policy,omitempty"` - AwsUnityCatalogPolicy map[string]*DataSourceAwsUnityCatalogPolicy `json:"databricks_aws_unity_catalog_policy,omitempty"` - Catalogs map[string]*DataSourceCatalogs `json:"databricks_catalogs,omitempty"` - Cluster map[string]*DataSourceCluster `json:"databricks_cluster,omitempty"` - ClusterPolicy map[string]*DataSourceClusterPolicy `json:"databricks_cluster_policy,omitempty"` - Clusters map[string]*DataSourceClusters `json:"databricks_clusters,omitempty"` - CurrentConfig map[string]*DataSourceCurrentConfig `json:"databricks_current_config,omitempty"` - CurrentMetastore map[string]*DataSourceCurrentMetastore `json:"databricks_current_metastore,omitempty"` - CurrentUser map[string]*DataSourceCurrentUser `json:"databricks_current_user,omitempty"` - DbfsFile map[string]*DataSourceDbfsFile `json:"databricks_dbfs_file,omitempty"` - DbfsFilePaths map[string]*DataSourceDbfsFilePaths `json:"databricks_dbfs_file_paths,omitempty"` - Directory map[string]*DataSourceDirectory `json:"databricks_directory,omitempty"` - Group map[string]*DataSourceGroup `json:"databricks_group,omitempty"` - InstancePool map[string]*DataSourceInstancePool `json:"databricks_instance_pool,omitempty"` - InstanceProfiles map[string]*DataSourceInstanceProfiles `json:"databricks_instance_profiles,omitempty"` - Job map[string]*DataSourceJob `json:"databricks_job,omitempty"` - Jobs map[string]*DataSourceJobs `json:"databricks_jobs,omitempty"` - Metastore map[string]*DataSourceMetastore `json:"databricks_metastore,omitempty"` - Metastores map[string]*DataSourceMetastores `json:"databricks_metastores,omitempty"` - MlflowModel map[string]*DataSourceMlflowModel `json:"databricks_mlflow_model,omitempty"` - MwsCredentials map[string]*DataSourceMwsCredentials `json:"databricks_mws_credentials,omitempty"` - MwsWorkspaces map[string]*DataSourceMwsWorkspaces `json:"databricks_mws_workspaces,omitempty"` - NodeType map[string]*DataSourceNodeType `json:"databricks_node_type,omitempty"` - Notebook map[string]*DataSourceNotebook `json:"databricks_notebook,omitempty"` - NotebookPaths map[string]*DataSourceNotebookPaths `json:"databricks_notebook_paths,omitempty"` - Pipelines map[string]*DataSourcePipelines `json:"databricks_pipelines,omitempty"` - Schemas map[string]*DataSourceSchemas `json:"databricks_schemas,omitempty"` - ServicePrincipal map[string]*DataSourceServicePrincipal `json:"databricks_service_principal,omitempty"` - ServicePrincipals map[string]*DataSourceServicePrincipals `json:"databricks_service_principals,omitempty"` - Share map[string]*DataSourceShare `json:"databricks_share,omitempty"` - Shares map[string]*DataSourceShares `json:"databricks_shares,omitempty"` - SparkVersion map[string]*DataSourceSparkVersion `json:"databricks_spark_version,omitempty"` - SqlWarehouse map[string]*DataSourceSqlWarehouse `json:"databricks_sql_warehouse,omitempty"` - SqlWarehouses map[string]*DataSourceSqlWarehouses `json:"databricks_sql_warehouses,omitempty"` - Tables map[string]*DataSourceTables `json:"databricks_tables,omitempty"` - User map[string]*DataSourceUser `json:"databricks_user,omitempty"` - Views map[string]*DataSourceViews `json:"databricks_views,omitempty"` - Volumes map[string]*DataSourceVolumes `json:"databricks_volumes,omitempty"` - Zones map[string]*DataSourceZones `json:"databricks_zones,omitempty"` + AwsAssumeRolePolicy map[string]any `json:"databricks_aws_assume_role_policy,omitempty"` + AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` + AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` + AwsUnityCatalogPolicy map[string]any `json:"databricks_aws_unity_catalog_policy,omitempty"` + Catalogs map[string]any `json:"databricks_catalogs,omitempty"` + Cluster map[string]any `json:"databricks_cluster,omitempty"` + ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` + Clusters map[string]any `json:"databricks_clusters,omitempty"` + CurrentConfig map[string]any `json:"databricks_current_config,omitempty"` + CurrentMetastore map[string]any `json:"databricks_current_metastore,omitempty"` + CurrentUser map[string]any `json:"databricks_current_user,omitempty"` + DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` + DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"` + Directory map[string]any `json:"databricks_directory,omitempty"` + Group map[string]any `json:"databricks_group,omitempty"` + InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` + InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"` + Job map[string]any `json:"databricks_job,omitempty"` + Jobs map[string]any `json:"databricks_jobs,omitempty"` + Metastore map[string]any `json:"databricks_metastore,omitempty"` + Metastores map[string]any `json:"databricks_metastores,omitempty"` + MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` + MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` + MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` + NodeType map[string]any `json:"databricks_node_type,omitempty"` + Notebook map[string]any `json:"databricks_notebook,omitempty"` + NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"` + Pipelines map[string]any `json:"databricks_pipelines,omitempty"` + Schemas map[string]any `json:"databricks_schemas,omitempty"` + ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` + ServicePrincipals map[string]any `json:"databricks_service_principals,omitempty"` + Share map[string]any `json:"databricks_share,omitempty"` + Shares map[string]any `json:"databricks_shares,omitempty"` + SparkVersion map[string]any `json:"databricks_spark_version,omitempty"` + SqlWarehouse map[string]any `json:"databricks_sql_warehouse,omitempty"` + SqlWarehouses map[string]any `json:"databricks_sql_warehouses,omitempty"` + Tables map[string]any `json:"databricks_tables,omitempty"` + User map[string]any `json:"databricks_user,omitempty"` + Views map[string]any `json:"databricks_views,omitempty"` + Volumes map[string]any `json:"databricks_volumes,omitempty"` + Zones map[string]any `json:"databricks_zones,omitempty"` } func NewDataSources() *DataSources { return &DataSources{ - AwsAssumeRolePolicy: make(map[string]*DataSourceAwsAssumeRolePolicy), - AwsBucketPolicy: make(map[string]*DataSourceAwsBucketPolicy), - AwsCrossaccountPolicy: make(map[string]*DataSourceAwsCrossaccountPolicy), - AwsUnityCatalogPolicy: make(map[string]*DataSourceAwsUnityCatalogPolicy), - Catalogs: make(map[string]*DataSourceCatalogs), - Cluster: make(map[string]*DataSourceCluster), - ClusterPolicy: make(map[string]*DataSourceClusterPolicy), - Clusters: make(map[string]*DataSourceClusters), - CurrentConfig: make(map[string]*DataSourceCurrentConfig), - CurrentMetastore: make(map[string]*DataSourceCurrentMetastore), - CurrentUser: make(map[string]*DataSourceCurrentUser), - DbfsFile: make(map[string]*DataSourceDbfsFile), - DbfsFilePaths: make(map[string]*DataSourceDbfsFilePaths), - Directory: make(map[string]*DataSourceDirectory), - Group: make(map[string]*DataSourceGroup), - InstancePool: make(map[string]*DataSourceInstancePool), - InstanceProfiles: make(map[string]*DataSourceInstanceProfiles), - Job: make(map[string]*DataSourceJob), - Jobs: make(map[string]*DataSourceJobs), - Metastore: make(map[string]*DataSourceMetastore), - Metastores: make(map[string]*DataSourceMetastores), - MlflowModel: make(map[string]*DataSourceMlflowModel), - MwsCredentials: make(map[string]*DataSourceMwsCredentials), - MwsWorkspaces: make(map[string]*DataSourceMwsWorkspaces), - NodeType: make(map[string]*DataSourceNodeType), - Notebook: make(map[string]*DataSourceNotebook), - NotebookPaths: make(map[string]*DataSourceNotebookPaths), - Pipelines: make(map[string]*DataSourcePipelines), - Schemas: make(map[string]*DataSourceSchemas), - ServicePrincipal: make(map[string]*DataSourceServicePrincipal), - ServicePrincipals: make(map[string]*DataSourceServicePrincipals), - Share: make(map[string]*DataSourceShare), - Shares: make(map[string]*DataSourceShares), - SparkVersion: make(map[string]*DataSourceSparkVersion), - SqlWarehouse: make(map[string]*DataSourceSqlWarehouse), - SqlWarehouses: make(map[string]*DataSourceSqlWarehouses), - Tables: make(map[string]*DataSourceTables), - User: make(map[string]*DataSourceUser), - Views: make(map[string]*DataSourceViews), - Volumes: make(map[string]*DataSourceVolumes), - Zones: make(map[string]*DataSourceZones), + AwsAssumeRolePolicy: make(map[string]any), + AwsBucketPolicy: make(map[string]any), + AwsCrossaccountPolicy: make(map[string]any), + AwsUnityCatalogPolicy: make(map[string]any), + Catalogs: make(map[string]any), + Cluster: make(map[string]any), + ClusterPolicy: make(map[string]any), + Clusters: make(map[string]any), + CurrentConfig: make(map[string]any), + CurrentMetastore: make(map[string]any), + CurrentUser: make(map[string]any), + DbfsFile: make(map[string]any), + DbfsFilePaths: make(map[string]any), + Directory: make(map[string]any), + Group: make(map[string]any), + InstancePool: make(map[string]any), + InstanceProfiles: make(map[string]any), + Job: make(map[string]any), + Jobs: make(map[string]any), + Metastore: make(map[string]any), + Metastores: make(map[string]any), + MlflowModel: make(map[string]any), + MwsCredentials: make(map[string]any), + MwsWorkspaces: make(map[string]any), + NodeType: make(map[string]any), + Notebook: make(map[string]any), + NotebookPaths: make(map[string]any), + Pipelines: make(map[string]any), + Schemas: make(map[string]any), + ServicePrincipal: make(map[string]any), + ServicePrincipals: make(map[string]any), + Share: make(map[string]any), + Shares: make(map[string]any), + SparkVersion: make(map[string]any), + SqlWarehouse: make(map[string]any), + SqlWarehouses: make(map[string]any), + Tables: make(map[string]any), + User: make(map[string]any), + Views: make(map[string]any), + Volumes: make(map[string]any), + Zones: make(map[string]any), } } diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index 0a468ba92..57f11d4b4 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -3,173 +3,173 @@ package schema type Resources struct { - AccessControlRuleSet map[string]*ResourceAccessControlRuleSet `json:"databricks_access_control_rule_set,omitempty"` - ArtifactAllowlist map[string]*ResourceArtifactAllowlist `json:"databricks_artifact_allowlist,omitempty"` - AwsS3Mount map[string]*ResourceAwsS3Mount `json:"databricks_aws_s3_mount,omitempty"` - AzureAdlsGen1Mount map[string]*ResourceAzureAdlsGen1Mount `json:"databricks_azure_adls_gen1_mount,omitempty"` - AzureAdlsGen2Mount map[string]*ResourceAzureAdlsGen2Mount `json:"databricks_azure_adls_gen2_mount,omitempty"` - AzureBlobMount map[string]*ResourceAzureBlobMount `json:"databricks_azure_blob_mount,omitempty"` - Catalog map[string]*ResourceCatalog `json:"databricks_catalog,omitempty"` - CatalogWorkspaceBinding map[string]*ResourceCatalogWorkspaceBinding `json:"databricks_catalog_workspace_binding,omitempty"` - Cluster map[string]*ResourceCluster `json:"databricks_cluster,omitempty"` - ClusterPolicy map[string]*ResourceClusterPolicy `json:"databricks_cluster_policy,omitempty"` - Connection map[string]*ResourceConnection `json:"databricks_connection,omitempty"` - DbfsFile map[string]*ResourceDbfsFile `json:"databricks_dbfs_file,omitempty"` - DefaultNamespaceSetting map[string]*ResourceDefaultNamespaceSetting `json:"databricks_default_namespace_setting,omitempty"` - Directory map[string]*ResourceDirectory `json:"databricks_directory,omitempty"` - Entitlements map[string]*ResourceEntitlements `json:"databricks_entitlements,omitempty"` - ExternalLocation map[string]*ResourceExternalLocation `json:"databricks_external_location,omitempty"` - GitCredential map[string]*ResourceGitCredential `json:"databricks_git_credential,omitempty"` - GlobalInitScript map[string]*ResourceGlobalInitScript `json:"databricks_global_init_script,omitempty"` - Grant map[string]*ResourceGrant `json:"databricks_grant,omitempty"` - Grants map[string]*ResourceGrants `json:"databricks_grants,omitempty"` - Group map[string]*ResourceGroup `json:"databricks_group,omitempty"` - GroupInstanceProfile map[string]*ResourceGroupInstanceProfile `json:"databricks_group_instance_profile,omitempty"` - GroupMember map[string]*ResourceGroupMember `json:"databricks_group_member,omitempty"` - GroupRole map[string]*ResourceGroupRole `json:"databricks_group_role,omitempty"` - InstancePool map[string]*ResourceInstancePool `json:"databricks_instance_pool,omitempty"` - InstanceProfile map[string]*ResourceInstanceProfile `json:"databricks_instance_profile,omitempty"` - IpAccessList map[string]*ResourceIpAccessList `json:"databricks_ip_access_list,omitempty"` - Job map[string]*ResourceJob `json:"databricks_job,omitempty"` - Library map[string]*ResourceLibrary `json:"databricks_library,omitempty"` - Metastore map[string]*ResourceMetastore `json:"databricks_metastore,omitempty"` - MetastoreAssignment map[string]*ResourceMetastoreAssignment `json:"databricks_metastore_assignment,omitempty"` - MetastoreDataAccess map[string]*ResourceMetastoreDataAccess `json:"databricks_metastore_data_access,omitempty"` - MlflowExperiment map[string]*ResourceMlflowExperiment `json:"databricks_mlflow_experiment,omitempty"` - MlflowModel map[string]*ResourceMlflowModel `json:"databricks_mlflow_model,omitempty"` - MlflowWebhook map[string]*ResourceMlflowWebhook `json:"databricks_mlflow_webhook,omitempty"` - ModelServing map[string]*ResourceModelServing `json:"databricks_model_serving,omitempty"` - Mount map[string]*ResourceMount `json:"databricks_mount,omitempty"` - MwsCredentials map[string]*ResourceMwsCredentials `json:"databricks_mws_credentials,omitempty"` - MwsCustomerManagedKeys map[string]*ResourceMwsCustomerManagedKeys `json:"databricks_mws_customer_managed_keys,omitempty"` - MwsLogDelivery map[string]*ResourceMwsLogDelivery `json:"databricks_mws_log_delivery,omitempty"` - MwsNetworks map[string]*ResourceMwsNetworks `json:"databricks_mws_networks,omitempty"` - MwsPermissionAssignment map[string]*ResourceMwsPermissionAssignment `json:"databricks_mws_permission_assignment,omitempty"` - MwsPrivateAccessSettings map[string]*ResourceMwsPrivateAccessSettings `json:"databricks_mws_private_access_settings,omitempty"` - MwsStorageConfigurations map[string]*ResourceMwsStorageConfigurations `json:"databricks_mws_storage_configurations,omitempty"` - MwsVpcEndpoint map[string]*ResourceMwsVpcEndpoint `json:"databricks_mws_vpc_endpoint,omitempty"` - MwsWorkspaces map[string]*ResourceMwsWorkspaces `json:"databricks_mws_workspaces,omitempty"` - Notebook map[string]*ResourceNotebook `json:"databricks_notebook,omitempty"` - OboToken map[string]*ResourceOboToken `json:"databricks_obo_token,omitempty"` - PermissionAssignment map[string]*ResourcePermissionAssignment `json:"databricks_permission_assignment,omitempty"` - Permissions map[string]*ResourcePermissions `json:"databricks_permissions,omitempty"` - Pipeline map[string]*ResourcePipeline `json:"databricks_pipeline,omitempty"` - Provider map[string]*ResourceProvider `json:"databricks_provider,omitempty"` - Recipient map[string]*ResourceRecipient `json:"databricks_recipient,omitempty"` - RegisteredModel map[string]*ResourceRegisteredModel `json:"databricks_registered_model,omitempty"` - Repo map[string]*ResourceRepo `json:"databricks_repo,omitempty"` - Schema map[string]*ResourceSchema `json:"databricks_schema,omitempty"` - Secret map[string]*ResourceSecret `json:"databricks_secret,omitempty"` - SecretAcl map[string]*ResourceSecretAcl `json:"databricks_secret_acl,omitempty"` - SecretScope map[string]*ResourceSecretScope `json:"databricks_secret_scope,omitempty"` - ServicePrincipal map[string]*ResourceServicePrincipal `json:"databricks_service_principal,omitempty"` - ServicePrincipalRole map[string]*ResourceServicePrincipalRole `json:"databricks_service_principal_role,omitempty"` - ServicePrincipalSecret map[string]*ResourceServicePrincipalSecret `json:"databricks_service_principal_secret,omitempty"` - Share map[string]*ResourceShare `json:"databricks_share,omitempty"` - SqlAlert map[string]*ResourceSqlAlert `json:"databricks_sql_alert,omitempty"` - SqlDashboard map[string]*ResourceSqlDashboard `json:"databricks_sql_dashboard,omitempty"` - SqlEndpoint map[string]*ResourceSqlEndpoint `json:"databricks_sql_endpoint,omitempty"` - SqlGlobalConfig map[string]*ResourceSqlGlobalConfig `json:"databricks_sql_global_config,omitempty"` - SqlPermissions map[string]*ResourceSqlPermissions `json:"databricks_sql_permissions,omitempty"` - SqlQuery map[string]*ResourceSqlQuery `json:"databricks_sql_query,omitempty"` - SqlTable map[string]*ResourceSqlTable `json:"databricks_sql_table,omitempty"` - SqlVisualization map[string]*ResourceSqlVisualization `json:"databricks_sql_visualization,omitempty"` - SqlWidget map[string]*ResourceSqlWidget `json:"databricks_sql_widget,omitempty"` - StorageCredential map[string]*ResourceStorageCredential `json:"databricks_storage_credential,omitempty"` - SystemSchema map[string]*ResourceSystemSchema `json:"databricks_system_schema,omitempty"` - Table map[string]*ResourceTable `json:"databricks_table,omitempty"` - Token map[string]*ResourceToken `json:"databricks_token,omitempty"` - User map[string]*ResourceUser `json:"databricks_user,omitempty"` - UserInstanceProfile map[string]*ResourceUserInstanceProfile `json:"databricks_user_instance_profile,omitempty"` - UserRole map[string]*ResourceUserRole `json:"databricks_user_role,omitempty"` - Volume map[string]*ResourceVolume `json:"databricks_volume,omitempty"` - WorkspaceConf map[string]*ResourceWorkspaceConf `json:"databricks_workspace_conf,omitempty"` - WorkspaceFile map[string]*ResourceWorkspaceFile `json:"databricks_workspace_file,omitempty"` + AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` + ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` + AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` + AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"` + AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"` + AzureBlobMount map[string]any `json:"databricks_azure_blob_mount,omitempty"` + Catalog map[string]any `json:"databricks_catalog,omitempty"` + CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"` + Cluster map[string]any `json:"databricks_cluster,omitempty"` + ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` + Connection map[string]any `json:"databricks_connection,omitempty"` + DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` + DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"` + Directory map[string]any `json:"databricks_directory,omitempty"` + Entitlements map[string]any `json:"databricks_entitlements,omitempty"` + ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` + GitCredential map[string]any `json:"databricks_git_credential,omitempty"` + GlobalInitScript map[string]any `json:"databricks_global_init_script,omitempty"` + Grant map[string]any `json:"databricks_grant,omitempty"` + Grants map[string]any `json:"databricks_grants,omitempty"` + Group map[string]any `json:"databricks_group,omitempty"` + GroupInstanceProfile map[string]any `json:"databricks_group_instance_profile,omitempty"` + GroupMember map[string]any `json:"databricks_group_member,omitempty"` + GroupRole map[string]any `json:"databricks_group_role,omitempty"` + InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` + InstanceProfile map[string]any `json:"databricks_instance_profile,omitempty"` + IpAccessList map[string]any `json:"databricks_ip_access_list,omitempty"` + Job map[string]any `json:"databricks_job,omitempty"` + Library map[string]any `json:"databricks_library,omitempty"` + Metastore map[string]any `json:"databricks_metastore,omitempty"` + MetastoreAssignment map[string]any `json:"databricks_metastore_assignment,omitempty"` + MetastoreDataAccess map[string]any `json:"databricks_metastore_data_access,omitempty"` + MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` + MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` + MlflowWebhook map[string]any `json:"databricks_mlflow_webhook,omitempty"` + ModelServing map[string]any `json:"databricks_model_serving,omitempty"` + Mount map[string]any `json:"databricks_mount,omitempty"` + MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` + MwsCustomerManagedKeys map[string]any `json:"databricks_mws_customer_managed_keys,omitempty"` + MwsLogDelivery map[string]any `json:"databricks_mws_log_delivery,omitempty"` + MwsNetworks map[string]any `json:"databricks_mws_networks,omitempty"` + MwsPermissionAssignment map[string]any `json:"databricks_mws_permission_assignment,omitempty"` + MwsPrivateAccessSettings map[string]any `json:"databricks_mws_private_access_settings,omitempty"` + MwsStorageConfigurations map[string]any `json:"databricks_mws_storage_configurations,omitempty"` + MwsVpcEndpoint map[string]any `json:"databricks_mws_vpc_endpoint,omitempty"` + MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` + Notebook map[string]any `json:"databricks_notebook,omitempty"` + OboToken map[string]any `json:"databricks_obo_token,omitempty"` + PermissionAssignment map[string]any `json:"databricks_permission_assignment,omitempty"` + Permissions map[string]any `json:"databricks_permissions,omitempty"` + Pipeline map[string]any `json:"databricks_pipeline,omitempty"` + Provider map[string]any `json:"databricks_provider,omitempty"` + Recipient map[string]any `json:"databricks_recipient,omitempty"` + RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` + Repo map[string]any `json:"databricks_repo,omitempty"` + Schema map[string]any `json:"databricks_schema,omitempty"` + Secret map[string]any `json:"databricks_secret,omitempty"` + SecretAcl map[string]any `json:"databricks_secret_acl,omitempty"` + SecretScope map[string]any `json:"databricks_secret_scope,omitempty"` + ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` + ServicePrincipalRole map[string]any `json:"databricks_service_principal_role,omitempty"` + ServicePrincipalSecret map[string]any `json:"databricks_service_principal_secret,omitempty"` + Share map[string]any `json:"databricks_share,omitempty"` + SqlAlert map[string]any `json:"databricks_sql_alert,omitempty"` + SqlDashboard map[string]any `json:"databricks_sql_dashboard,omitempty"` + SqlEndpoint map[string]any `json:"databricks_sql_endpoint,omitempty"` + SqlGlobalConfig map[string]any `json:"databricks_sql_global_config,omitempty"` + SqlPermissions map[string]any `json:"databricks_sql_permissions,omitempty"` + SqlQuery map[string]any `json:"databricks_sql_query,omitempty"` + SqlTable map[string]any `json:"databricks_sql_table,omitempty"` + SqlVisualization map[string]any `json:"databricks_sql_visualization,omitempty"` + SqlWidget map[string]any `json:"databricks_sql_widget,omitempty"` + StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` + SystemSchema map[string]any `json:"databricks_system_schema,omitempty"` + Table map[string]any `json:"databricks_table,omitempty"` + Token map[string]any `json:"databricks_token,omitempty"` + User map[string]any `json:"databricks_user,omitempty"` + UserInstanceProfile map[string]any `json:"databricks_user_instance_profile,omitempty"` + UserRole map[string]any `json:"databricks_user_role,omitempty"` + Volume map[string]any `json:"databricks_volume,omitempty"` + WorkspaceConf map[string]any `json:"databricks_workspace_conf,omitempty"` + WorkspaceFile map[string]any `json:"databricks_workspace_file,omitempty"` } func NewResources() *Resources { return &Resources{ - AccessControlRuleSet: make(map[string]*ResourceAccessControlRuleSet), - ArtifactAllowlist: make(map[string]*ResourceArtifactAllowlist), - AwsS3Mount: make(map[string]*ResourceAwsS3Mount), - AzureAdlsGen1Mount: make(map[string]*ResourceAzureAdlsGen1Mount), - AzureAdlsGen2Mount: make(map[string]*ResourceAzureAdlsGen2Mount), - AzureBlobMount: make(map[string]*ResourceAzureBlobMount), - Catalog: make(map[string]*ResourceCatalog), - CatalogWorkspaceBinding: make(map[string]*ResourceCatalogWorkspaceBinding), - Cluster: make(map[string]*ResourceCluster), - ClusterPolicy: make(map[string]*ResourceClusterPolicy), - Connection: make(map[string]*ResourceConnection), - DbfsFile: make(map[string]*ResourceDbfsFile), - DefaultNamespaceSetting: make(map[string]*ResourceDefaultNamespaceSetting), - Directory: make(map[string]*ResourceDirectory), - Entitlements: make(map[string]*ResourceEntitlements), - ExternalLocation: make(map[string]*ResourceExternalLocation), - GitCredential: make(map[string]*ResourceGitCredential), - GlobalInitScript: make(map[string]*ResourceGlobalInitScript), - Grant: make(map[string]*ResourceGrant), - Grants: make(map[string]*ResourceGrants), - Group: make(map[string]*ResourceGroup), - GroupInstanceProfile: make(map[string]*ResourceGroupInstanceProfile), - GroupMember: make(map[string]*ResourceGroupMember), - GroupRole: make(map[string]*ResourceGroupRole), - InstancePool: make(map[string]*ResourceInstancePool), - InstanceProfile: make(map[string]*ResourceInstanceProfile), - IpAccessList: make(map[string]*ResourceIpAccessList), - Job: make(map[string]*ResourceJob), - Library: make(map[string]*ResourceLibrary), - Metastore: make(map[string]*ResourceMetastore), - MetastoreAssignment: make(map[string]*ResourceMetastoreAssignment), - MetastoreDataAccess: make(map[string]*ResourceMetastoreDataAccess), - MlflowExperiment: make(map[string]*ResourceMlflowExperiment), - MlflowModel: make(map[string]*ResourceMlflowModel), - MlflowWebhook: make(map[string]*ResourceMlflowWebhook), - ModelServing: make(map[string]*ResourceModelServing), - Mount: make(map[string]*ResourceMount), - MwsCredentials: make(map[string]*ResourceMwsCredentials), - MwsCustomerManagedKeys: make(map[string]*ResourceMwsCustomerManagedKeys), - MwsLogDelivery: make(map[string]*ResourceMwsLogDelivery), - MwsNetworks: make(map[string]*ResourceMwsNetworks), - MwsPermissionAssignment: make(map[string]*ResourceMwsPermissionAssignment), - MwsPrivateAccessSettings: make(map[string]*ResourceMwsPrivateAccessSettings), - MwsStorageConfigurations: make(map[string]*ResourceMwsStorageConfigurations), - MwsVpcEndpoint: make(map[string]*ResourceMwsVpcEndpoint), - MwsWorkspaces: make(map[string]*ResourceMwsWorkspaces), - Notebook: make(map[string]*ResourceNotebook), - OboToken: make(map[string]*ResourceOboToken), - PermissionAssignment: make(map[string]*ResourcePermissionAssignment), - Permissions: make(map[string]*ResourcePermissions), - Pipeline: make(map[string]*ResourcePipeline), - Provider: make(map[string]*ResourceProvider), - Recipient: make(map[string]*ResourceRecipient), - RegisteredModel: make(map[string]*ResourceRegisteredModel), - Repo: make(map[string]*ResourceRepo), - Schema: make(map[string]*ResourceSchema), - Secret: make(map[string]*ResourceSecret), - SecretAcl: make(map[string]*ResourceSecretAcl), - SecretScope: make(map[string]*ResourceSecretScope), - ServicePrincipal: make(map[string]*ResourceServicePrincipal), - ServicePrincipalRole: make(map[string]*ResourceServicePrincipalRole), - ServicePrincipalSecret: make(map[string]*ResourceServicePrincipalSecret), - Share: make(map[string]*ResourceShare), - SqlAlert: make(map[string]*ResourceSqlAlert), - SqlDashboard: make(map[string]*ResourceSqlDashboard), - SqlEndpoint: make(map[string]*ResourceSqlEndpoint), - SqlGlobalConfig: make(map[string]*ResourceSqlGlobalConfig), - SqlPermissions: make(map[string]*ResourceSqlPermissions), - SqlQuery: make(map[string]*ResourceSqlQuery), - SqlTable: make(map[string]*ResourceSqlTable), - SqlVisualization: make(map[string]*ResourceSqlVisualization), - SqlWidget: make(map[string]*ResourceSqlWidget), - StorageCredential: make(map[string]*ResourceStorageCredential), - SystemSchema: make(map[string]*ResourceSystemSchema), - Table: make(map[string]*ResourceTable), - Token: make(map[string]*ResourceToken), - User: make(map[string]*ResourceUser), - UserInstanceProfile: make(map[string]*ResourceUserInstanceProfile), - UserRole: make(map[string]*ResourceUserRole), - Volume: make(map[string]*ResourceVolume), - WorkspaceConf: make(map[string]*ResourceWorkspaceConf), - WorkspaceFile: make(map[string]*ResourceWorkspaceFile), + AccessControlRuleSet: make(map[string]any), + ArtifactAllowlist: make(map[string]any), + AwsS3Mount: make(map[string]any), + AzureAdlsGen1Mount: make(map[string]any), + AzureAdlsGen2Mount: make(map[string]any), + AzureBlobMount: make(map[string]any), + Catalog: make(map[string]any), + CatalogWorkspaceBinding: make(map[string]any), + Cluster: make(map[string]any), + ClusterPolicy: make(map[string]any), + Connection: make(map[string]any), + DbfsFile: make(map[string]any), + DefaultNamespaceSetting: make(map[string]any), + Directory: make(map[string]any), + Entitlements: make(map[string]any), + ExternalLocation: make(map[string]any), + GitCredential: make(map[string]any), + GlobalInitScript: make(map[string]any), + Grant: make(map[string]any), + Grants: make(map[string]any), + Group: make(map[string]any), + GroupInstanceProfile: make(map[string]any), + GroupMember: make(map[string]any), + GroupRole: make(map[string]any), + InstancePool: make(map[string]any), + InstanceProfile: make(map[string]any), + IpAccessList: make(map[string]any), + Job: make(map[string]any), + Library: make(map[string]any), + Metastore: make(map[string]any), + MetastoreAssignment: make(map[string]any), + MetastoreDataAccess: make(map[string]any), + MlflowExperiment: make(map[string]any), + MlflowModel: make(map[string]any), + MlflowWebhook: make(map[string]any), + ModelServing: make(map[string]any), + Mount: make(map[string]any), + MwsCredentials: make(map[string]any), + MwsCustomerManagedKeys: make(map[string]any), + MwsLogDelivery: make(map[string]any), + MwsNetworks: make(map[string]any), + MwsPermissionAssignment: make(map[string]any), + MwsPrivateAccessSettings: make(map[string]any), + MwsStorageConfigurations: make(map[string]any), + MwsVpcEndpoint: make(map[string]any), + MwsWorkspaces: make(map[string]any), + Notebook: make(map[string]any), + OboToken: make(map[string]any), + PermissionAssignment: make(map[string]any), + Permissions: make(map[string]any), + Pipeline: make(map[string]any), + Provider: make(map[string]any), + Recipient: make(map[string]any), + RegisteredModel: make(map[string]any), + Repo: make(map[string]any), + Schema: make(map[string]any), + Secret: make(map[string]any), + SecretAcl: make(map[string]any), + SecretScope: make(map[string]any), + ServicePrincipal: make(map[string]any), + ServicePrincipalRole: make(map[string]any), + ServicePrincipalSecret: make(map[string]any), + Share: make(map[string]any), + SqlAlert: make(map[string]any), + SqlDashboard: make(map[string]any), + SqlEndpoint: make(map[string]any), + SqlGlobalConfig: make(map[string]any), + SqlPermissions: make(map[string]any), + SqlQuery: make(map[string]any), + SqlTable: make(map[string]any), + SqlVisualization: make(map[string]any), + SqlWidget: make(map[string]any), + StorageCredential: make(map[string]any), + SystemSchema: make(map[string]any), + Table: make(map[string]any), + Token: make(map[string]any), + User: make(map[string]any), + UserInstanceProfile: make(map[string]any), + UserRole: make(map[string]any), + Volume: make(map[string]any), + WorkspaceConf: make(map[string]any), + WorkspaceFile: make(map[string]any), } } From ea8daf1f97d66ec88d72fd7b9a7d286a80d68f90 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 16 Feb 2024 13:56:02 +0100 Subject: [PATCH 038/286] Avoid infinite recursion when normalizing a recursive type (#1213) ## Changes This is a follow-up to #1211 prompted by the addition of a recursive type in the Go SDK v0.31.0 (`jobs.ForEachTask`). When populating missing fields with their zero values we must not inadvertently recurse into a recursive type. ## Tests New unit test fails with a stack overflow if the fix if the check is disabled. --- libs/dyn/convert/normalize.go | 41 +++++++++++++++++------------- libs/dyn/convert/normalize_test.go | 31 ++++++++++++++++++++++ 2 files changed, 55 insertions(+), 17 deletions(-) diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 26df09578..e0dfbda23 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -3,6 +3,7 @@ package convert import ( "fmt" "reflect" + "slices" "strconv" "github.com/databricks/cli/libs/diag" @@ -31,21 +32,21 @@ func Normalize(dst any, src dyn.Value, opts ...NormalizeOption) (dyn.Value, diag } } - return n.normalizeType(reflect.TypeOf(dst), src) + return n.normalizeType(reflect.TypeOf(dst), src, []reflect.Type{}) } -func (n normalizeOptions) normalizeType(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeType(typ reflect.Type, src dyn.Value, seen []reflect.Type) (dyn.Value, diag.Diagnostics) { for typ.Kind() == reflect.Pointer { typ = typ.Elem() } switch typ.Kind() { case reflect.Struct: - return n.normalizeStruct(typ, src) + return n.normalizeStruct(typ, src, append(seen, typ)) case reflect.Map: - return n.normalizeMap(typ, src) + return n.normalizeMap(typ, src, append(seen, typ)) case reflect.Slice: - return n.normalizeSlice(typ, src) + return n.normalizeSlice(typ, src, append(seen, typ)) case reflect.String: return n.normalizeString(typ, src) case reflect.Bool: @@ -67,7 +68,7 @@ func typeMismatch(expected dyn.Kind, src dyn.Value) diag.Diagnostic { } } -func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen []reflect.Type) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { @@ -86,7 +87,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value) (dyn. } // Normalize the value according to the field type. - v, err := n.normalizeType(typ.FieldByIndex(index).Type, v) + v, err := n.normalizeType(typ.FieldByIndex(index).Type, v, seen) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. @@ -115,20 +116,26 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value) (dyn. ftyp = ftyp.Elem() } + // Skip field if we have already seen its type to avoid infinite recursion + // when filling in the zero value of a recursive type. + if slices.Contains(seen, ftyp) { + continue + } + var v dyn.Value switch ftyp.Kind() { case reflect.Struct, reflect.Map: - v, _ = n.normalizeType(ftyp, dyn.V(map[string]dyn.Value{})) + v, _ = n.normalizeType(ftyp, dyn.V(map[string]dyn.Value{}), seen) case reflect.Slice: - v, _ = n.normalizeType(ftyp, dyn.V([]dyn.Value{})) + v, _ = n.normalizeType(ftyp, dyn.V([]dyn.Value{}), seen) case reflect.String: - v, _ = n.normalizeType(ftyp, dyn.V("")) + v, _ = n.normalizeType(ftyp, dyn.V(""), seen) case reflect.Bool: - v, _ = n.normalizeType(ftyp, dyn.V(false)) + v, _ = n.normalizeType(ftyp, dyn.V(false), seen) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - v, _ = n.normalizeType(ftyp, dyn.V(int64(0))) + v, _ = n.normalizeType(ftyp, dyn.V(int64(0)), seen) case reflect.Float32, reflect.Float64: - v, _ = n.normalizeType(ftyp, dyn.V(float64(0))) + v, _ = n.normalizeType(ftyp, dyn.V(float64(0)), seen) default: // Skip fields for which we do not have a natural [dyn.Value] equivalent. // For example, we don't handle reflect.Complex* and reflect.Uint* types. @@ -147,7 +154,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value) (dyn. return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src)) } -func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []reflect.Type) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { @@ -155,7 +162,7 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Val out := make(map[string]dyn.Value) for k, v := range src.MustMap() { // Normalize the value according to the map element type. - v, err := n.normalizeType(typ.Elem(), v) + v, err := n.normalizeType(typ.Elem(), v, seen) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. @@ -175,7 +182,7 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Val return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src)) } -func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value, seen []reflect.Type) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { @@ -183,7 +190,7 @@ func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.V out := make([]dyn.Value, 0, len(src.MustSequence())) for _, v := range src.MustSequence() { // Normalize the value according to the slice element type. - v, err := n.normalizeType(typ.Elem(), v) + v, err := n.normalizeType(typ.Elem(), v, seen) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index d59cc3b35..82abc8260 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -189,6 +189,37 @@ func TestNormalizeStructIncludeMissingFields(t *testing.T) { }), vout) } +func TestNormalizeStructIncludeMissingFieldsOnRecursiveType(t *testing.T) { + type Tmp struct { + // Verify that structs are recursively normalized if not set. + Ptr *Tmp `json:"ptr"` + + // Verify that primitive types are zero-initialized if not set. + String string `json:"string"` + } + + var typ Tmp + vin := dyn.V(map[string]dyn.Value{ + "ptr": dyn.V(map[string]dyn.Value{ + "ptr": dyn.V(map[string]dyn.Value{ + "string": dyn.V("already set"), + }), + }), + }) + vout, err := Normalize(typ, vin, IncludeMissingFields) + assert.Empty(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "ptr": dyn.V(map[string]dyn.Value{ + "ptr": dyn.V(map[string]dyn.Value{ + // Note: the ptr field is not zero-initialized because that would recurse. + "string": dyn.V("already set"), + }), + "string": dyn.V(""), + }), + "string": dyn.V(""), + }), vout) +} + func TestNormalizeMap(t *testing.T) { var typ map[string]string vin := dyn.V(map[string]dyn.Value{ From 5f59572cb300b222dbd3aef8f414de8032ff4936 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 16 Feb 2024 17:19:40 +0100 Subject: [PATCH 039/286] Fix issue where interpolating a new ref would rewrite unrelated fields (#1217) ## Changes When resolving a value returned by the lookup function, the code would call into `resolveRef` with the key that `resolveKey` was called with. In doing so, it would cache the _new_ ref under that key. We fix this by caching ref resolution only at the top level and relying on lookup caching to avoid duplicate work. This came up while testing #1098. ## Tests Unit test. --- libs/dyn/dynvar/resolve.go | 18 +++++---------- libs/dyn/dynvar/resolve_test.go | 40 +++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/libs/dyn/dynvar/resolve.go b/libs/dyn/dynvar/resolve.go index b5417cac2..b8a0aef62 100644 --- a/libs/dyn/dynvar/resolve.go +++ b/libs/dyn/dynvar/resolve.go @@ -105,21 +105,17 @@ func (r *resolver) resolveVariableReferences() (err error) { keys := maps.Keys(r.refs) sort.Strings(keys) for _, key := range keys { - _, err := r.resolveRef(key, r.refs[key], []string{key}) + v, err := r.resolveRef(r.refs[key], []string{key}) if err != nil { return err } + r.resolved[key] = v } return nil } -func (r *resolver) resolveRef(key string, ref ref, seen []string) (dyn.Value, error) { - // Check if we have already resolved this variable reference. - if v, ok := r.resolved[key]; ok { - return v, nil - } - +func (r *resolver) resolveRef(ref ref, seen []string) (dyn.Value, error) { // This is an unresolved variable reference. deps := ref.references() @@ -154,7 +150,6 @@ func (r *resolver) resolveRef(key string, ref ref, seen []string) (dyn.Value, er if ref.isPure() && complete { // If the variable reference is pure, we can substitute it. // This is useful for interpolating values of non-string types. - r.resolved[key] = resolved[0] return resolved[0], nil } @@ -178,10 +173,7 @@ func (r *resolver) resolveRef(key string, ref ref, seen []string) (dyn.Value, er ref.str = strings.Replace(ref.str, ref.matches[j][0], s, 1) } - // Store the interpolated value. - v := dyn.NewValue(ref.str, ref.value.Location()) - r.resolved[key] = v - return v, nil + return dyn.NewValue(ref.str, ref.value.Location()), nil } func (r *resolver) resolveKey(key string, seen []string) (dyn.Value, error) { @@ -211,7 +203,7 @@ func (r *resolver) resolveKey(key string, seen []string) (dyn.Value, error) { // If the returned value is a valid variable reference, resolve it. ref, ok := newRef(v) if ok { - v, err = r.resolveRef(key, ref, seen) + v, err = r.resolveRef(ref, seen) } // Cache the return value and return to the caller. diff --git a/libs/dyn/dynvar/resolve_test.go b/libs/dyn/dynvar/resolve_test.go index 1234b7cbf..304ed9391 100644 --- a/libs/dyn/dynvar/resolve_test.go +++ b/libs/dyn/dynvar/resolve_test.go @@ -207,3 +207,43 @@ func TestResolveWithSkipEverything(t *testing.T) { assert.Equal(t, "${b} ${a} ${a} ${b}", getByPath(t, out, "f").MustString()) assert.Equal(t, "${d} ${c} ${c} ${d}", getByPath(t, out, "g").MustString()) } + +func TestResolveWithInterpolateNewRef(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "a": dyn.V("a"), + "b": dyn.V("${a}"), + }) + + // The call replaces ${a} with ${foobar} and skips everything else. + out, err := dynvar.Resolve(in, func(path dyn.Path) (dyn.Value, error) { + if path.String() == "a" { + return dyn.V("${foobar}"), nil + } + return dyn.InvalidValue, dynvar.ErrSkipResolution + }) + + require.NoError(t, err) + assert.Equal(t, "a", getByPath(t, out, "a").MustString()) + assert.Equal(t, "${foobar}", getByPath(t, out, "b").MustString()) +} + +func TestResolveWithInterpolateAliasedRef(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "a": dyn.V("a"), + "b": dyn.V("${a}"), + "c": dyn.V("${x}"), + }) + + // The call replaces ${x} with ${b} and skips everything else. + out, err := dynvar.Resolve(in, func(path dyn.Path) (dyn.Value, error) { + if path.String() == "x" { + return dyn.V("${b}"), nil + } + return dyn.GetByPath(in, path) + }) + + require.NoError(t, err) + assert.Equal(t, "a", getByPath(t, out, "a").MustString()) + assert.Equal(t, "a", getByPath(t, out, "b").MustString()) + assert.Equal(t, "a", getByPath(t, out, "c").MustString()) +} From 87dd46a3f8a6524877eb47246938b6ed22d62537 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 16 Feb 2024 20:41:58 +0100 Subject: [PATCH 040/286] Use dynamic configuration model in bundles (#1098) ## Changes This is a fundamental change to how we load and process bundle configuration. We now depend on the configuration being represented as a `dyn.Value`. This representation is functionally equivalent to Go's `any` (it is variadic) and allows us to capture metadata associated with a value, such as where it was defined (e.g. file, line, and column). It also allows us to represent Go's zero values properly (e.g. empty string, integer equal to 0, or boolean false). Using this representation allows us to let the configuration model deviate from the typed structure we have been relying on so far (`config.Root`). We need to deviate from these types when using variables for fields that are not a string themselves. For example, using `${var.num_workers}` for an integer `workers` field was impossible until now (though not implemented in this change). The loader for a `dyn.Value` includes functionality to capture any and all type mismatches between the user-defined configuration and the expected types. These mismatches can be surfaced as validation errors in future PRs. Given that many mutators expect the typed struct to be the source of truth, this change converts between the dynamic representation and the typed representation on mutator entry and exit. Existing mutators can continue to modify the typed representation and these modifications are reflected in the dynamic representation (see `MarkMutatorEntry` and `MarkMutatorExit` in `bundle/config/root.go`). Required changes included in this change: * The existing interpolation package is removed in favor of `libs/dyn/dynvar`. * Functionality to merge job clusters, job tasks, and pipeline clusters are now all broken out into their own mutators. To be implemented later: * Allow variable references for non-string types. * Surface diagnostics about the configuration provided by the user in the validation output. * Some mutators use a resource's configuration file path to resolve related relative paths. These depend on `bundle/config/paths.Path` being set and populated through `ConfigureConfigFilePath`. Instead, they should interact with the dynamically typed configuration directly. Doing this also unlocks being able to differentiate different base paths used within a job (e.g. a task override with a relative path defined in a directory other than the base job). ## Tests * Existing unit tests pass (some have been modified to accommodate) * Integration tests pass --- NOTICE | 5 - bundle/config/artifact.go | 4 +- bundle/config/git.go | 4 +- bundle/config/interpolation/interpolation.go | 254 ----------- .../interpolation/interpolation_test.go | 251 ----------- bundle/config/interpolation/lookup.go | 51 --- bundle/config/interpolation/lookup_test.go | 81 ---- bundle/config/interpolation/setter.go | 48 -- bundle/config/mutator/environments_compat.go | 63 +++ .../mutator/environments_compat_test.go | 65 +++ .../expand_pipeline_glob_paths_test.go | 7 +- bundle/config/mutator/merge_job_clusters.go | 42 ++ .../config/mutator/merge_job_clusters_test.go | 105 +++++ bundle/config/mutator/merge_job_tasks.go | 42 ++ bundle/config/mutator/merge_job_tasks_test.go | 117 +++++ .../config/mutator/merge_pipeline_clusters.go | 45 ++ .../mutator/merge_pipeline_clusters_test.go | 125 ++++++ bundle/config/mutator/mutator.go | 8 +- .../config/mutator/override_compute_test.go | 4 +- .../mutator/process_target_mode_test.go | 2 +- .../mutator/resolve_variable_references.go | 81 ++++ .../resolve_variable_references_test.go | 97 +++++ bundle/config/mutator/rewrite_sync_paths.go | 58 +++ .../config/mutator/rewrite_sync_paths_test.go | 103 +++++ bundle/config/mutator/select_target.go | 4 +- bundle/config/mutator/translate_paths_test.go | 71 ++- bundle/config/paths/paths.go | 13 + bundle/config/resources.go | 37 +- bundle/config/resources/job.go | 67 --- bundle/config/resources/job_test.go | 116 ----- bundle/config/resources/pipeline.go | 49 --- bundle/config/resources/pipeline_test.go | 76 ---- bundle/config/root.go | 410 +++++++++++++----- bundle/config/root_test.go | 54 +-- bundle/config/sync.go | 18 - bundle/config/target.go | 3 +- bundle/config/variable/variable.go | 2 - bundle/deploy/metadata/compute_test.go | 18 +- bundle/deploy/terraform/interpolate.go | 84 ++-- bundle/deploy/terraform/interpolate_test.go | 92 ++++ bundle/internal/bundletest/location.go | 34 ++ bundle/mutator.go | 33 +- bundle/phases/build.go | 6 +- bundle/phases/initialize.go | 14 +- .../tests/bundle/pipeline_glob_paths_test.go | 24 +- .../resources/databricks.yml | 2 - bundle/tests/environment_overrides_test.go | 7 +- bundle/tests/interpolation_test.go | 14 +- bundle/tests/job_with_spark_conf_test.go | 19 +- bundle/tests/loader.go | 13 +- bundle/tests/override_sync_test.go | 24 +- .../tests/relative_path_with_includes_test.go | 20 +- bundle/tests/run_as/databricks.yml | 20 +- bundle/tests/run_as_test.go | 46 +- bundle/tests/variables_test.go | 55 +-- cmd/bundle/deploy.go | 23 +- cmd/bundle/deployment/bind.go | 16 +- cmd/bundle/deployment/unbind.go | 14 +- cmd/bundle/destroy.go | 13 +- cmd/bundle/utils/utils.go | 6 +- cmd/bundle/validate.go | 7 + cmd/root/bundle.go | 8 +- go.mod | 1 - go.sum | 2 - internal/bundle/artifacts_test.go | 29 +- libs/dyn/merge/elements_by_key.go | 67 +++ libs/dyn/merge/elements_by_key_test.go | 52 +++ libs/dyn/value.go | 9 + libs/template/renderer_test.go | 6 +- 69 files changed, 1908 insertions(+), 1452 deletions(-) delete mode 100644 bundle/config/interpolation/interpolation.go delete mode 100644 bundle/config/interpolation/interpolation_test.go delete mode 100644 bundle/config/interpolation/lookup.go delete mode 100644 bundle/config/interpolation/lookup_test.go delete mode 100644 bundle/config/interpolation/setter.go create mode 100644 bundle/config/mutator/environments_compat.go create mode 100644 bundle/config/mutator/environments_compat_test.go create mode 100644 bundle/config/mutator/merge_job_clusters.go create mode 100644 bundle/config/mutator/merge_job_clusters_test.go create mode 100644 bundle/config/mutator/merge_job_tasks.go create mode 100644 bundle/config/mutator/merge_job_tasks_test.go create mode 100644 bundle/config/mutator/merge_pipeline_clusters.go create mode 100644 bundle/config/mutator/merge_pipeline_clusters_test.go create mode 100644 bundle/config/mutator/resolve_variable_references.go create mode 100644 bundle/config/mutator/resolve_variable_references_test.go create mode 100644 bundle/config/mutator/rewrite_sync_paths.go create mode 100644 bundle/config/mutator/rewrite_sync_paths_test.go delete mode 100644 bundle/config/resources/job_test.go delete mode 100644 bundle/config/resources/pipeline_test.go create mode 100644 bundle/deploy/terraform/interpolate_test.go create mode 100644 bundle/internal/bundletest/location.go create mode 100644 libs/dyn/merge/elements_by_key.go create mode 100644 libs/dyn/merge/elements_by_key_test.go diff --git a/NOTICE b/NOTICE index 71ba7fbcc..fdc2a88cf 100644 --- a/NOTICE +++ b/NOTICE @@ -57,11 +57,6 @@ google/uuid - https://github.com/google/uuid Copyright (c) 2009,2014 Google Inc. All rights reserved. License - https://github.com/google/uuid/blob/master/LICENSE -imdario/mergo - https://github.com/imdario/mergo -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. -License - https://github.com/imdario/mergo/blob/master/LICENSE - manifoldco/promptui - https://github.com/manifoldco/promptui Copyright (c) 2017, Arigato Machine Inc. All rights reserved. License - https://github.com/manifoldco/promptui/blob/master/LICENSE.md diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index dbf327fa0..219def571 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -10,9 +10,9 @@ import ( type Artifacts map[string]*Artifact -func (artifacts Artifacts) SetConfigFilePath(path string) { +func (artifacts Artifacts) ConfigureConfigFilePath() { for _, artifact := range artifacts { - artifact.ConfigFilePath = path + artifact.ConfigureConfigFilePath() } } diff --git a/bundle/config/git.go b/bundle/config/git.go index 58a5d54d2..f9f2f83e5 100644 --- a/bundle/config/git.go +++ b/bundle/config/git.go @@ -9,8 +9,8 @@ type Git struct { BundleRootPath string `json:"bundle_root_path,omitempty" bundle:"readonly"` // Inferred is set to true if the Git details were inferred and weren't set explicitly - Inferred bool `json:"-" bundle:"readonly"` + Inferred bool `json:"inferred,omitempty" bundle:"readonly"` // The actual branch according to Git (may be different from the configured branch) - ActualBranch string `json:"-" bundle:"readonly"` + ActualBranch string `json:"actual_branch,omitempty" bundle:"readonly"` } diff --git a/bundle/config/interpolation/interpolation.go b/bundle/config/interpolation/interpolation.go deleted file mode 100644 index 8ba0b8b1f..000000000 --- a/bundle/config/interpolation/interpolation.go +++ /dev/null @@ -1,254 +0,0 @@ -package interpolation - -import ( - "context" - "errors" - "fmt" - "reflect" - "regexp" - "sort" - "strings" - - "slices" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/variable" - "golang.org/x/exp/maps" -) - -const Delimiter = "." - -// must start with alphabet, support hyphens and underscores in middle but must end with character -var re = regexp.MustCompile(`\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\}`) - -type stringField struct { - path string - - getter - setter -} - -func newStringField(path string, g getter, s setter) *stringField { - return &stringField{ - path: path, - - getter: g, - setter: s, - } -} - -func (s *stringField) dependsOn() []string { - var out []string - m := re.FindAllStringSubmatch(s.Get(), -1) - for i := range m { - out = append(out, m[i][1]) - } - return out -} - -func (s *stringField) interpolate(fns []LookupFunction, lookup map[string]string) { - out := re.ReplaceAllStringFunc(s.Get(), func(s string) string { - // Turn the whole match into the submatch. - match := re.FindStringSubmatch(s) - for _, fn := range fns { - v, err := fn(match[1], lookup) - if errors.Is(err, ErrSkipInterpolation) { - continue - } - if err != nil { - panic(err) - } - return v - } - - // No substitution. - return s - }) - - s.Set(out) -} - -type accumulator struct { - // all string fields in the bundle config - strings map[string]*stringField - - // contains path -> resolved_string mapping for string fields in the config - // The resolved strings will NOT contain any variable references that could - // have been resolved, however there might still be references that cannot - // be resolved - memo map[string]string -} - -// jsonFieldName returns the name in a field's `json` tag. -// Returns the empty string if it isn't set. -func jsonFieldName(sf reflect.StructField) string { - tag, ok := sf.Tag.Lookup("json") - if !ok { - return "" - } - parts := strings.Split(tag, ",") - if parts[0] == "-" { - return "" - } - return parts[0] -} - -func (a *accumulator) walkStruct(scope []string, rv reflect.Value) { - num := rv.NumField() - for i := 0; i < num; i++ { - sf := rv.Type().Field(i) - f := rv.Field(i) - - // Walk field with the same scope for anonymous (embedded) fields. - if sf.Anonymous { - a.walk(scope, f, anySetter{f}) - continue - } - - // Skip unnamed fields. - fieldName := jsonFieldName(rv.Type().Field(i)) - if fieldName == "" { - continue - } - - a.walk(append(scope, fieldName), f, anySetter{f}) - } -} - -func (a *accumulator) walk(scope []string, rv reflect.Value, s setter) { - // Dereference pointer. - if rv.Type().Kind() == reflect.Pointer { - // Skip nil pointers. - if rv.IsNil() { - return - } - rv = rv.Elem() - s = anySetter{rv} - } - - switch rv.Type().Kind() { - case reflect.String: - path := strings.Join(scope, Delimiter) - a.strings[path] = newStringField(path, anyGetter{rv}, s) - - // register alias for variable value. `var.foo` would be the alias for - // `variables.foo.value` - if len(scope) == 3 && scope[0] == "variables" && scope[2] == "value" { - aliasPath := strings.Join([]string{variable.VariableReferencePrefix, scope[1]}, Delimiter) - a.strings[aliasPath] = a.strings[path] - } - case reflect.Struct: - a.walkStruct(scope, rv) - case reflect.Map: - if rv.Type().Key().Kind() != reflect.String { - panic("only support string keys in map") - } - keys := rv.MapKeys() - for _, key := range keys { - a.walk(append(scope, key.String()), rv.MapIndex(key), mapSetter{rv, key}) - } - case reflect.Slice: - n := rv.Len() - name := scope[len(scope)-1] - base := scope[:len(scope)-1] - for i := 0; i < n; i++ { - element := rv.Index(i) - a.walk(append(base, fmt.Sprintf("%s[%d]", name, i)), element, anySetter{element}) - } - } -} - -// walk and gather all string fields in the config -func (a *accumulator) start(v any) { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Pointer { - panic("expect pointer") - } - rv = rv.Elem() - if rv.Type().Kind() != reflect.Struct { - panic("expect struct") - } - - a.strings = make(map[string]*stringField) - a.memo = make(map[string]string) - a.walk([]string{}, rv, nilSetter{}) -} - -// recursively interpolate variables in a depth first manner -func (a *accumulator) Resolve(path string, seenPaths []string, fns ...LookupFunction) error { - // return early if the path is already resolved - if _, ok := a.memo[path]; ok { - return nil - } - - // fetch the string node to resolve - field, ok := a.strings[path] - if !ok { - return fmt.Errorf("no value found for interpolation reference: ${%s}", path) - } - - // return early if the string field has no variables to interpolate - if len(field.dependsOn()) == 0 { - a.memo[path] = field.Get() - return nil - } - - // resolve all variables refered in the root string field - for _, childFieldPath := range field.dependsOn() { - // error if there is a loop in variable interpolation - if slices.Contains(seenPaths, childFieldPath) { - return fmt.Errorf("cycle detected in field resolution: %s", strings.Join(append(seenPaths, childFieldPath), " -> ")) - } - - // recursive resolve variables in the child fields - err := a.Resolve(childFieldPath, append(seenPaths, childFieldPath), fns...) - if err != nil { - return err - } - } - - // interpolate root string once all variable references in it have been resolved - field.interpolate(fns, a.memo) - - // record interpolated string in memo - a.memo[path] = field.Get() - return nil -} - -// Interpolate all string fields in the config -func (a *accumulator) expand(fns ...LookupFunction) error { - // sorting paths for stable order of iteration - paths := maps.Keys(a.strings) - sort.Strings(paths) - - // iterate over paths for all strings fields in the config - for _, path := range paths { - err := a.Resolve(path, []string{path}, fns...) - if err != nil { - return err - } - } - return nil -} - -type interpolate struct { - fns []LookupFunction -} - -func (m *interpolate) expand(v any) error { - a := accumulator{} - a.start(v) - return a.expand(m.fns...) -} - -func Interpolate(fns ...LookupFunction) bundle.Mutator { - return &interpolate{fns: fns} -} - -func (m *interpolate) Name() string { - return "Interpolate" -} - -func (m *interpolate) Apply(_ context.Context, b *bundle.Bundle) error { - return m.expand(&b.Config) -} diff --git a/bundle/config/interpolation/interpolation_test.go b/bundle/config/interpolation/interpolation_test.go deleted file mode 100644 index cccb6dc71..000000000 --- a/bundle/config/interpolation/interpolation_test.go +++ /dev/null @@ -1,251 +0,0 @@ -package interpolation - -import ( - "testing" - - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/variable" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type nest struct { - X string `json:"x"` - Y *string `json:"y"` - Z map[string]string `json:"z"` -} - -type foo struct { - A string `json:"a"` - B string `json:"b"` - C string `json:"c"` - - // Pointer field - D *string `json:"d"` - - // Struct field - E nest `json:"e"` - - // Map field - F map[string]string `json:"f"` -} - -func expand(v any) error { - a := accumulator{} - a.start(v) - return a.expand(DefaultLookup) -} - -func TestInterpolationVariables(t *testing.T) { - f := foo{ - A: "a", - B: "${a}", - C: "${a}", - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "a", f.A) - assert.Equal(t, "a", f.B) - assert.Equal(t, "a", f.C) -} - -func TestInterpolationVariablesSpecialChars(t *testing.T) { - type bar struct { - A string `json:"a-b"` - B string `json:"b_c"` - C string `json:"c-_a"` - } - f := bar{ - A: "a", - B: "${a-b}", - C: "${a-b}", - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "a", f.A) - assert.Equal(t, "a", f.B) - assert.Equal(t, "a", f.C) -} - -func TestInterpolationValidMatches(t *testing.T) { - expectedMatches := map[string]string{ - "${hello_world.world_world}": "hello_world.world_world", - "${helloworld.world-world}": "helloworld.world-world", - "${hello-world.world-world}": "hello-world.world-world", - } - for interpolationStr, expectedMatch := range expectedMatches { - match := re.FindStringSubmatch(interpolationStr) - assert.True(t, len(match) > 0, - "Failed to match %s and find %s", interpolationStr, expectedMatch) - assert.Equal(t, expectedMatch, match[1], - "Failed to match the exact pattern %s and find %s", interpolationStr, expectedMatch) - } -} - -func TestInterpolationInvalidMatches(t *testing.T) { - invalidMatches := []string{ - "${hello_world-.world_world}", // the first segment ending must not end with hyphen (-) - "${hello_world-_.world_world}", // the first segment ending must not end with underscore (_) - "${helloworld.world-world-}", // second segment must not end with hyphen (-) - "${helloworld-.world-world}", // first segment must not end with hyphen (-) - "${helloworld.-world-world}", // second segment must not start with hyphen (-) - "${-hello-world.-world-world-}", // must not start or end with hyphen (-) - "${_-_._-_.id}", // cannot use _- in sequence - "${0helloworld.world-world}", // interpolated first section shouldn't start with number - "${helloworld.9world-world}", // interpolated second section shouldn't start with number - "${a-a.a-_a-a.id}", // fails because of -_ in the second segment - "${a-a.a--a-a.id}", // fails because of -- in the second segment - } - for _, invalidMatch := range invalidMatches { - match := re.FindStringSubmatch(invalidMatch) - assert.True(t, len(match) == 0, "Should be invalid interpolation: %s", invalidMatch) - } -} - -func TestInterpolationWithPointers(t *testing.T) { - fd := "${a}" - f := foo{ - A: "a", - D: &fd, - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "a", f.A) - assert.Equal(t, "a", *f.D) -} - -func TestInterpolationWithStruct(t *testing.T) { - fy := "${e.x}" - f := foo{ - A: "${e.x}", - E: nest{ - X: "x", - Y: &fy, - }, - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "x", f.A) - assert.Equal(t, "x", f.E.X) - assert.Equal(t, "x", *f.E.Y) -} - -func TestInterpolationWithMap(t *testing.T) { - f := foo{ - A: "${f.a}", - F: map[string]string{ - "a": "a", - "b": "${f.a}", - }, - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "a", f.A) - assert.Equal(t, "a", f.F["a"]) - assert.Equal(t, "a", f.F["b"]) -} - -func TestInterpolationWithResursiveVariableReferences(t *testing.T) { - f := foo{ - A: "a", - B: "(${a})", - C: "${a} ${b}", - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "a", f.A) - assert.Equal(t, "(a)", f.B) - assert.Equal(t, "a (a)", f.C) -} - -func TestInterpolationVariableLoopError(t *testing.T) { - d := "${b}" - f := foo{ - A: "a", - B: "${c}", - C: "${d}", - D: &d, - } - - err := expand(&f) - assert.ErrorContains(t, err, "cycle detected in field resolution: b -> c -> d -> b") -} - -func TestInterpolationForVariables(t *testing.T) { - foo := "abc" - bar := "${var.foo} def" - apple := "${var.foo} ${var.bar}" - config := config.Root{ - Variables: map[string]*variable.Variable{ - "foo": { - Value: &foo, - }, - "bar": { - Value: &bar, - }, - "apple": { - Value: &apple, - }, - }, - Bundle: config.Bundle{ - Name: "${var.apple} ${var.foo}", - }, - } - - err := expand(&config) - assert.NoError(t, err) - assert.Equal(t, "abc", *(config.Variables["foo"].Value)) - assert.Equal(t, "abc def", *(config.Variables["bar"].Value)) - assert.Equal(t, "abc abc def", *(config.Variables["apple"].Value)) - assert.Equal(t, "abc abc def abc", config.Bundle.Name) -} - -func TestInterpolationLoopForVariables(t *testing.T) { - foo := "${var.bar}" - bar := "${var.foo}" - config := config.Root{ - Variables: map[string]*variable.Variable{ - "foo": { - Value: &foo, - }, - "bar": { - Value: &bar, - }, - }, - Bundle: config.Bundle{ - Name: "${var.foo}", - }, - } - - err := expand(&config) - assert.ErrorContains(t, err, "cycle detected in field resolution: bundle.name -> var.foo -> var.bar -> var.foo") -} - -func TestInterpolationInvalidVariableReference(t *testing.T) { - foo := "abc" - config := config.Root{ - Variables: map[string]*variable.Variable{ - "foo": { - Value: &foo, - }, - }, - Bundle: config.Bundle{ - Name: "${vars.foo}", - }, - } - - err := expand(&config) - assert.ErrorContains(t, err, "no value found for interpolation reference: ${vars.foo}") -} diff --git a/bundle/config/interpolation/lookup.go b/bundle/config/interpolation/lookup.go deleted file mode 100644 index 3dc5047a7..000000000 --- a/bundle/config/interpolation/lookup.go +++ /dev/null @@ -1,51 +0,0 @@ -package interpolation - -import ( - "errors" - "fmt" - "slices" - "strings" -) - -// LookupFunction returns the value to rewrite a path expression to. -type LookupFunction func(path string, depends map[string]string) (string, error) - -// ErrSkipInterpolation can be used to fall through from [LookupFunction]. -var ErrSkipInterpolation = errors.New("skip interpolation") - -// DefaultLookup looks up the specified path in the map. -// It returns an error if it doesn't exist. -func DefaultLookup(path string, lookup map[string]string) (string, error) { - v, ok := lookup[path] - if !ok { - return "", fmt.Errorf("expected to find value for path: %s", path) - } - return v, nil -} - -func pathPrefixMatches(prefix []string, path string) bool { - parts := strings.Split(path, Delimiter) - return len(parts) >= len(prefix) && slices.Compare(prefix, parts[0:len(prefix)]) == 0 -} - -// ExcludeLookupsInPath is a lookup function that skips lookups for the specified path. -func ExcludeLookupsInPath(exclude ...string) LookupFunction { - return func(path string, lookup map[string]string) (string, error) { - if pathPrefixMatches(exclude, path) { - return "", ErrSkipInterpolation - } - - return DefaultLookup(path, lookup) - } -} - -// IncludeLookupsInPath is a lookup function that limits lookups to the specified path. -func IncludeLookupsInPath(include ...string) LookupFunction { - return func(path string, lookup map[string]string) (string, error) { - if !pathPrefixMatches(include, path) { - return "", ErrSkipInterpolation - } - - return DefaultLookup(path, lookup) - } -} diff --git a/bundle/config/interpolation/lookup_test.go b/bundle/config/interpolation/lookup_test.go deleted file mode 100644 index 61628bf04..000000000 --- a/bundle/config/interpolation/lookup_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package interpolation - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type interpolationFixture struct { - A map[string]string `json:"a"` - B map[string]string `json:"b"` - C map[string]string `json:"c"` -} - -func fixture() interpolationFixture { - return interpolationFixture{ - A: map[string]string{ - "x": "1", - }, - B: map[string]string{ - "x": "2", - }, - C: map[string]string{ - "ax": "${a.x}", - "bx": "${b.x}", - }, - } -} - -func TestExcludePath(t *testing.T) { - tmp := fixture() - m := interpolate{ - fns: []LookupFunction{ - ExcludeLookupsInPath("a"), - }, - } - - err := m.expand(&tmp) - require.NoError(t, err) - - assert.Equal(t, "1", tmp.A["x"]) - assert.Equal(t, "2", tmp.B["x"]) - assert.Equal(t, "${a.x}", tmp.C["ax"]) - assert.Equal(t, "2", tmp.C["bx"]) -} - -func TestIncludePath(t *testing.T) { - tmp := fixture() - m := interpolate{ - fns: []LookupFunction{ - IncludeLookupsInPath("a"), - }, - } - - err := m.expand(&tmp) - require.NoError(t, err) - - assert.Equal(t, "1", tmp.A["x"]) - assert.Equal(t, "2", tmp.B["x"]) - assert.Equal(t, "1", tmp.C["ax"]) - assert.Equal(t, "${b.x}", tmp.C["bx"]) -} - -func TestIncludePathMultiple(t *testing.T) { - tmp := fixture() - m := interpolate{ - fns: []LookupFunction{ - IncludeLookupsInPath("a"), - IncludeLookupsInPath("b"), - }, - } - - err := m.expand(&tmp) - require.NoError(t, err) - - assert.Equal(t, "1", tmp.A["x"]) - assert.Equal(t, "2", tmp.B["x"]) - assert.Equal(t, "1", tmp.C["ax"]) - assert.Equal(t, "2", tmp.C["bx"]) -} diff --git a/bundle/config/interpolation/setter.go b/bundle/config/interpolation/setter.go deleted file mode 100644 index cce39c611..000000000 --- a/bundle/config/interpolation/setter.go +++ /dev/null @@ -1,48 +0,0 @@ -package interpolation - -import "reflect" - -// String values in maps are not addressable and therefore not settable -// through Go's reflection mechanism. This interface solves this limitation -// by wrapping the setter differently for addressable values and map values. -type setter interface { - Set(string) -} - -type nilSetter struct{} - -func (nilSetter) Set(_ string) { - panic("nil setter") -} - -type anySetter struct { - rv reflect.Value -} - -func (s anySetter) Set(str string) { - s.rv.SetString(str) -} - -type mapSetter struct { - // map[string]string - m reflect.Value - - // key - k reflect.Value -} - -func (s mapSetter) Set(str string) { - s.m.SetMapIndex(s.k, reflect.ValueOf(str)) -} - -type getter interface { - Get() string -} - -type anyGetter struct { - rv reflect.Value -} - -func (g anyGetter) Get() string { - return g.rv.String() -} diff --git a/bundle/config/mutator/environments_compat.go b/bundle/config/mutator/environments_compat.go new file mode 100644 index 000000000..0eb996b14 --- /dev/null +++ b/bundle/config/mutator/environments_compat.go @@ -0,0 +1,63 @@ +package mutator + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dyn" +) + +type environmentsToTargets struct{} + +func EnvironmentsToTargets() bundle.Mutator { + return &environmentsToTargets{} +} + +func (m *environmentsToTargets) Name() string { + return "EnvironmentsToTargets" +} + +func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) error { + // Short circuit if the "environments" key is not set. + // This is the common case. + if b.Config.Environments == nil { + return nil + } + + // The "environments" key is set; validate and rewrite it to "targets". + return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + environments := v.Get("environments") + targets := v.Get("targets") + + // Return an error if both "environments" and "targets" are set. + if environments != dyn.NilValue && targets != dyn.NilValue { + return dyn.NilValue, fmt.Errorf( + "both 'environments' and 'targets' are specified; only 'targets' should be used: %s", + environments.Location().String(), + ) + } + + // Rewrite "environments" to "targets". + if environments != dyn.NilValue && targets == dyn.NilValue { + nv, err := dyn.Set(v, "targets", environments) + if err != nil { + return dyn.NilValue, err + } + // Drop the "environments" key. + return dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + switch len(p) { + case 0: + return v, nil + case 1: + if p[0] == dyn.Key("environments") { + return v, dyn.ErrDrop + } + } + return v, dyn.ErrSkip + }) + } + + return v, nil + }) +} diff --git a/bundle/config/mutator/environments_compat_test.go b/bundle/config/mutator/environments_compat_test.go new file mode 100644 index 000000000..f7045b3df --- /dev/null +++ b/bundle/config/mutator/environments_compat_test.go @@ -0,0 +1,65 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/stretchr/testify/assert" +) + +func TestEnvironmentsToTargetsWithBothDefined(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Environments: map[string]*config.Target{ + "name": { + Mode: config.Development, + }, + }, + Targets: map[string]*config.Target{ + "name": { + Mode: config.Development, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) + assert.ErrorContains(t, err, `both 'environments' and 'targets' are specified;`) +} + +func TestEnvironmentsToTargetsWithEnvironmentsDefined(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Environments: map[string]*config.Target{ + "name": { + Mode: config.Development, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) + assert.NoError(t, err) + assert.Len(t, b.Config.Environments, 0) + assert.Len(t, b.Config.Targets, 1) +} + +func TestEnvironmentsToTargetsWithTargetsDefined(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Targets: map[string]*config.Target{ + "name": { + Mode: config.Development, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) + assert.NoError(t, err) + assert.Len(t, b.Config.Environments, 0) + assert.Len(t, b.Config.Targets, 1) +} diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go index ad86865af..e2cba80e2 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths_test.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -8,8 +8,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/stretchr/testify/require" @@ -42,9 +42,6 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -98,6 +95,8 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + m := ExpandPipelineGlobPaths() err := bundle.Apply(context.Background(), b, m) require.NoError(t, err) diff --git a/bundle/config/mutator/merge_job_clusters.go b/bundle/config/mutator/merge_job_clusters.go new file mode 100644 index 000000000..e8378f480 --- /dev/null +++ b/bundle/config/mutator/merge_job_clusters.go @@ -0,0 +1,42 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" +) + +type mergeJobClusters struct{} + +func MergeJobClusters() bundle.Mutator { + return &mergeJobClusters{} +} + +func (m *mergeJobClusters) Name() string { + return "MergeJobClusters" +} + +func (m *mergeJobClusters) jobClusterKey(v dyn.Value) string { + switch v.Kind() { + case dyn.KindNil: + return "" + case dyn.KindString: + return v.MustString() + default: + panic("job cluster key must be a string") + } +} + +func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) error { + return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + if v == dyn.NilValue { + return v, nil + } + + return dyn.Map(v, "resources.jobs", dyn.Foreach(func(job dyn.Value) (dyn.Value, error) { + return dyn.Map(job, "job_clusters", merge.ElementsByKey("job_cluster_key", m.jobClusterKey)) + })) + }) +} diff --git a/bundle/config/mutator/merge_job_clusters_test.go b/bundle/config/mutator/merge_job_clusters_test.go new file mode 100644 index 000000000..a32b70281 --- /dev/null +++ b/bundle/config/mutator/merge_job_clusters_test.go @@ -0,0 +1,105 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" +) + +func TestMergeJobClusters(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + JobClusterKey: "foo", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + }, + }, + { + JobClusterKey: "bar", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "10.4.x-scala2.12", + }, + }, + { + JobClusterKey: "foo", + NewCluster: &compute.ClusterSpec{ + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + }, + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.MergeJobClusters()) + assert.NoError(t, err) + + j := b.Config.Resources.Jobs["foo"] + + assert.Len(t, j.JobClusters, 2) + assert.Equal(t, "foo", j.JobClusters[0].JobClusterKey) + assert.Equal(t, "bar", j.JobClusters[1].JobClusterKey) + + // This job cluster was merged with a subsequent one. + jc0 := j.JobClusters[0].NewCluster + assert.Equal(t, "13.3.x-scala2.12", jc0.SparkVersion) + assert.Equal(t, "i3.2xlarge", jc0.NodeTypeId) + assert.Equal(t, 4, jc0.NumWorkers) + + // This job cluster was left untouched. + jc1 := j.JobClusters[1].NewCluster + assert.Equal(t, "10.4.x-scala2.12", jc1.SparkVersion) +} + +func TestMergeJobClustersWithNilKey(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + }, + }, + { + NewCluster: &compute.ClusterSpec{ + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + }, + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.MergeJobClusters()) + assert.NoError(t, err) + assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) +} diff --git a/bundle/config/mutator/merge_job_tasks.go b/bundle/config/mutator/merge_job_tasks.go new file mode 100644 index 000000000..7394368ab --- /dev/null +++ b/bundle/config/mutator/merge_job_tasks.go @@ -0,0 +1,42 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" +) + +type mergeJobTasks struct{} + +func MergeJobTasks() bundle.Mutator { + return &mergeJobTasks{} +} + +func (m *mergeJobTasks) Name() string { + return "MergeJobTasks" +} + +func (m *mergeJobTasks) taskKeyString(v dyn.Value) string { + switch v.Kind() { + case dyn.KindNil: + return "" + case dyn.KindString: + return v.MustString() + default: + panic("task key must be a string") + } +} + +func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) error { + return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + if v == dyn.NilValue { + return v, nil + } + + return dyn.Map(v, "resources.jobs", dyn.Foreach(func(job dyn.Value) (dyn.Value, error) { + return dyn.Map(job, "tasks", merge.ElementsByKey("task_key", m.taskKeyString)) + })) + }) +} diff --git a/bundle/config/mutator/merge_job_tasks_test.go b/bundle/config/mutator/merge_job_tasks_test.go new file mode 100644 index 000000000..b3fb357e0 --- /dev/null +++ b/bundle/config/mutator/merge_job_tasks_test.go @@ -0,0 +1,117 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" +) + +func TestMergeJobTasks(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "foo", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + }, + Libraries: []compute.Library{ + {Whl: "package1"}, + }, + }, + { + TaskKey: "bar", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "10.4.x-scala2.12", + }, + }, + { + TaskKey: "foo", + NewCluster: &compute.ClusterSpec{ + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + Libraries: []compute.Library{ + {Pypi: &compute.PythonPyPiLibrary{ + Package: "package2", + }}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.MergeJobTasks()) + assert.NoError(t, err) + + j := b.Config.Resources.Jobs["foo"] + + assert.Len(t, j.Tasks, 2) + assert.Equal(t, "foo", j.Tasks[0].TaskKey) + assert.Equal(t, "bar", j.Tasks[1].TaskKey) + + // This task was merged with a subsequent one. + task0 := j.Tasks[0] + cluster := task0.NewCluster + assert.Equal(t, "13.3.x-scala2.12", cluster.SparkVersion) + assert.Equal(t, "i3.2xlarge", cluster.NodeTypeId) + assert.Equal(t, 4, cluster.NumWorkers) + assert.Len(t, task0.Libraries, 2) + assert.Equal(t, task0.Libraries[0].Whl, "package1") + assert.Equal(t, task0.Libraries[1].Pypi.Package, "package2") + + // This task was left untouched. + task1 := j.Tasks[1].NewCluster + assert.Equal(t, "10.4.x-scala2.12", task1.SparkVersion) +} + +func TestMergeJobTasksWithNilKey(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + }, + }, + { + NewCluster: &compute.ClusterSpec{ + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + }, + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.MergeJobTasks()) + assert.NoError(t, err) + assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 1) +} diff --git a/bundle/config/mutator/merge_pipeline_clusters.go b/bundle/config/mutator/merge_pipeline_clusters.go new file mode 100644 index 000000000..777ce611b --- /dev/null +++ b/bundle/config/mutator/merge_pipeline_clusters.go @@ -0,0 +1,45 @@ +package mutator + +import ( + "context" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" +) + +type mergePipelineClusters struct{} + +func MergePipelineClusters() bundle.Mutator { + return &mergePipelineClusters{} +} + +func (m *mergePipelineClusters) Name() string { + return "MergePipelineClusters" +} + +func (m *mergePipelineClusters) clusterLabel(v dyn.Value) string { + switch v.Kind() { + case dyn.KindNil: + // Note: the cluster label is optional and defaults to 'default'. + // We therefore ALSO merge all clusters without a label. + return "default" + case dyn.KindString: + return strings.ToLower(v.MustString()) + default: + panic("task key must be a string") + } +} + +func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) error { + return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + if v == dyn.NilValue { + return v, nil + } + + return dyn.Map(v, "resources.pipelines", dyn.Foreach(func(pipeline dyn.Value) (dyn.Value, error) { + return dyn.Map(pipeline, "clusters", merge.ElementsByKey("label", m.clusterLabel)) + })) + }) +} diff --git a/bundle/config/mutator/merge_pipeline_clusters_test.go b/bundle/config/mutator/merge_pipeline_clusters_test.go new file mode 100644 index 000000000..fb54a67d2 --- /dev/null +++ b/bundle/config/mutator/merge_pipeline_clusters_test.go @@ -0,0 +1,125 @@ +package mutator_test + +import ( + "context" + "strings" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/assert" +) + +func TestMergePipelineClusters(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "foo": { + PipelineSpec: &pipelines.PipelineSpec{ + Clusters: []pipelines.PipelineCluster{ + { + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + PolicyId: "1234", + }, + { + Label: "maintenance", + NodeTypeId: "i3.2xlarge", + }, + { + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, err) + + p := b.Config.Resources.Pipelines["foo"] + + assert.Len(t, p.Clusters, 2) + assert.Equal(t, "default", p.Clusters[0].Label) + assert.Equal(t, "maintenance", p.Clusters[1].Label) + + // The default cluster was merged with a subsequent one. + pc0 := p.Clusters[0] + assert.Equal(t, "i3.2xlarge", pc0.NodeTypeId) + assert.Equal(t, 4, pc0.NumWorkers) + assert.Equal(t, "1234", pc0.PolicyId) + + // The maintenance cluster was left untouched. + pc1 := p.Clusters[1] + assert.Equal(t, "i3.2xlarge", pc1.NodeTypeId) +} + +func TestMergePipelineClustersCaseInsensitive(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "foo": { + PipelineSpec: &pipelines.PipelineSpec{ + Clusters: []pipelines.PipelineCluster{ + { + Label: "default", + NumWorkers: 2, + }, + { + Label: "DEFAULT", + NumWorkers: 4, + }, + }, + }, + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, err) + + p := b.Config.Resources.Pipelines["foo"] + assert.Len(t, p.Clusters, 1) + + // The default cluster was merged with a subsequent one. + pc0 := p.Clusters[0] + assert.Equal(t, "default", strings.ToLower(pc0.Label)) + assert.Equal(t, 4, pc0.NumWorkers) +} + +func TestMergePipelineClustersNilPipelines(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: nil, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, err) +} + +func TestMergePipelineClustersEmptyPipelines(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{}, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, err) +} diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index b6327e859..c45a6c15e 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -10,12 +10,16 @@ func DefaultMutators() []bundle.Mutator { return []bundle.Mutator{ scripts.Execute(config.ScriptPreInit), ProcessRootIncludes(), + EnvironmentsToTargets(), InitializeVariables(), DefineDefaultTarget(), LoadGitDetails(), } } -func DefaultMutatorsForTarget(env string) []bundle.Mutator { - return append(DefaultMutators(), SelectTarget(env)) +func DefaultMutatorsForTarget(target string) []bundle.Mutator { + return append( + DefaultMutators(), + SelectTarget(target), + ) } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 4c5d4427d..7cc500c60 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -28,7 +28,9 @@ func TestOverrideDevelopment(t *testing.T) { Name: "job1", Tasks: []jobs.Task{ { - NewCluster: &compute.ClusterSpec{}, + NewCluster: &compute.ClusterSpec{ + SparkVersion: "14.2.x-scala2.12", + }, }, { ExistingClusterId: "cluster2", diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index f02d78865..6d8025803 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -274,12 +274,12 @@ func TestAllResourcesMocked(t *testing.T) { // Make sure that we at least rename all resources func TestAllResourcesRenamed(t *testing.T) { b := mockBundle(config.Development) - resources := reflect.ValueOf(b.Config.Resources) m := ProcessTargetMode() err := bundle.Apply(context.Background(), b, m) require.NoError(t, err) + resources := reflect.ValueOf(b.Config.Resources) for i := 0; i < resources.NumField(); i++ { field := resources.Field(i) diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go new file mode 100644 index 000000000..a9ff70f68 --- /dev/null +++ b/bundle/config/mutator/resolve_variable_references.go @@ -0,0 +1,81 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/dynvar" +) + +type resolveVariableReferences struct { + prefixes []string +} + +func ResolveVariableReferences(prefixes ...string) bundle.Mutator { + return &resolveVariableReferences{prefixes: prefixes} +} + +func (*resolveVariableReferences) Name() string { + return "ResolveVariableReferences" +} + +func (m *resolveVariableReferences) Validate(ctx context.Context, b *bundle.Bundle) error { + return nil +} + +func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) error { + prefixes := make([]dyn.Path, len(m.prefixes)) + for i, prefix := range m.prefixes { + prefixes[i] = dyn.MustPathFromString(prefix) + } + + // The path ${var.foo} is a shorthand for ${variables.foo.value}. + // We rewrite it here to make the resolution logic simpler. + varPath := dyn.NewPath(dyn.Key("var")) + + return b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { + // Synthesize a copy of the root that has all fields that are present in the type + // but not set in the dynamic value set to their corresponding empty value. + // This enables users to interpolate variable references to fields that haven't + // been explicitly set in the dynamic value. + // + // For example: ${bundle.git.origin_url} should resolve to an empty string + // if a bundle isn't located in a Git repository (yet). + // + // This is consistent with the behavior prior to using the dynamic value system. + // + // We can ignore the diagnostics return valuebecause we know that the dynamic value + // has already been normalized when it was first loaded from the configuration file. + // + normalized, _ := convert.Normalize(b.Config, root, convert.IncludeMissingFields) + lookup := func(path dyn.Path) (dyn.Value, error) { + // Future opportunity: if we lookup this path in both the given root + // and the synthesized root, we know if it was explicitly set or implied to be empty. + // Then we can emit a warning if it was not explicitly set. + return dyn.GetByPath(normalized, path) + } + + // Resolve variable references in all values. + return dynvar.Resolve(root, func(path dyn.Path) (dyn.Value, error) { + // Rewrite the shorthand path ${var.foo} into ${variables.foo.value}. + if path.HasPrefix(varPath) && len(path) == 2 { + path = dyn.NewPath( + dyn.Key("variables"), + path[1], + dyn.Key("value"), + ) + } + + // Perform resolution only if the path starts with one of the specified prefixes. + for _, prefix := range prefixes { + if path.HasPrefix(prefix) { + return lookup(path) + } + } + + return dyn.InvalidValue, dynvar.ErrSkipResolution + }) + }) +} diff --git a/bundle/config/mutator/resolve_variable_references_test.go b/bundle/config/mutator/resolve_variable_references_test.go new file mode 100644 index 000000000..1f253d41c --- /dev/null +++ b/bundle/config/mutator/resolve_variable_references_test.go @@ -0,0 +1,97 @@ +package mutator + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestResolveVariableReferences(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Workspace: config.Workspace{ + RootPath: "${bundle.name}/bar", + FilePath: "${workspace.root_path}/baz", + }, + }, + } + + // Apply with an invalid prefix. This should not change the workspace root path. + err := bundle.Apply(context.Background(), b, ResolveVariableReferences("doesntexist")) + require.NoError(t, err) + require.Equal(t, "${bundle.name}/bar", b.Config.Workspace.RootPath) + require.Equal(t, "${workspace.root_path}/baz", b.Config.Workspace.FilePath) + + // Apply with a valid prefix. This should change the workspace root path. + err = bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "workspace")) + require.NoError(t, err) + require.Equal(t, "example/bar", b.Config.Workspace.RootPath) + require.Equal(t, "example/bar/baz", b.Config.Workspace.FilePath) +} + +func TestResolveVariableReferencesToBundleVariables(t *testing.T) { + s := func(s string) *string { + return &s + } + + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Workspace: config.Workspace{ + RootPath: "${bundle.name}/${var.foo}", + }, + Variables: map[string]*variable.Variable{ + "foo": { + Value: s("bar"), + }, + }, + }, + } + + // Apply with a valid prefix. This should change the workspace root path. + err := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "variables")) + require.NoError(t, err) + require.Equal(t, "example/bar", b.Config.Workspace.RootPath) +} + +func TestResolveVariableReferencesToEmptyFields(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + Git: config.Git{ + Branch: "", + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tags: map[string]string{ + "git_branch": "${bundle.git.branch}", + }, + }, + }, + }, + }, + }, + } + + // Apply for the bundle prefix. + err := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle")) + require.NoError(t, err) + + // The job settings should have been interpolated to an empty string. + require.Equal(t, "", b.Config.Resources.Jobs["job1"].JobSettings.Tags["git_branch"]) +} diff --git a/bundle/config/mutator/rewrite_sync_paths.go b/bundle/config/mutator/rewrite_sync_paths.go new file mode 100644 index 000000000..c1761690d --- /dev/null +++ b/bundle/config/mutator/rewrite_sync_paths.go @@ -0,0 +1,58 @@ +package mutator + +import ( + "context" + "path/filepath" + + "github.com/databricks/cli/bundle" + + "github.com/databricks/cli/libs/dyn" +) + +type rewriteSyncPaths struct{} + +func RewriteSyncPaths() bundle.Mutator { + return &rewriteSyncPaths{} +} + +func (m *rewriteSyncPaths) Name() string { + return "RewriteSyncPaths" +} + +// makeRelativeTo returns a dyn.MapFunc that joins the relative path +// of the file it was defined in w.r.t. the bundle root path, with +// the contents of the string node. +// +// For example: +// - The bundle root is /foo +// - The configuration file that defines the string node is at /foo/bar/baz.yml +// - The string node contains "somefile.*" +// +// Then the resulting value will be "bar/somefile.*". +func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc { + return func(v dyn.Value) (dyn.Value, error) { + dir := filepath.Dir(v.Location().File) + rel, err := filepath.Rel(root, dir) + if err != nil { + return dyn.NilValue, err + } + + return dyn.NewValue(filepath.Join(rel, v.MustString()), v.Location()), nil + } +} + +func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) error { + return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + return dyn.Map(v, "sync", func(v dyn.Value) (nv dyn.Value, err error) { + v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.Config.Path))) + if err != nil { + return dyn.NilValue, err + } + v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.Config.Path))) + if err != nil { + return dyn.NilValue, err + } + return v, nil + }) + }) +} diff --git a/bundle/config/mutator/rewrite_sync_paths_test.go b/bundle/config/mutator/rewrite_sync_paths_test.go new file mode 100644 index 000000000..576333e92 --- /dev/null +++ b/bundle/config/mutator/rewrite_sync_paths_test.go @@ -0,0 +1,103 @@ +package mutator_test + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/stretchr/testify/assert" +) + +func TestRewriteSyncPathsRelative(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Path: ".", + Sync: config.Sync{ + Include: []string{ + "foo", + "bar", + }, + Exclude: []string{ + "baz", + "qux", + }, + }, + }, + } + + bundletest.SetLocation(b, "sync.include[0]", "./file.yml") + bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml") + bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml") + bundletest.SetLocation(b, "sync.exclude[1]", "./a/b/c/file.yml") + + err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, err) + + assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0]) + assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1]) + assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0]) + assert.Equal(t, filepath.Clean("a/b/c/qux"), b.Config.Sync.Exclude[1]) +} + +func TestRewriteSyncPathsAbsolute(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Path: "/tmp/dir", + Sync: config.Sync{ + Include: []string{ + "foo", + "bar", + }, + Exclude: []string{ + "baz", + "qux", + }, + }, + }, + } + + bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml") + bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml") + bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml") + bundletest.SetLocation(b, "sync.exclude[1]", "/tmp/dir/a/b/c/file.yml") + + err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, err) + + assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0]) + assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1]) + assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0]) + assert.Equal(t, filepath.Clean("a/b/c/qux"), b.Config.Sync.Exclude[1]) +} + +func TestRewriteSyncPathsErrorPaths(t *testing.T) { + t.Run("no sync block", func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Path: ".", + }, + } + + err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, err) + }) + + t.Run("empty include/exclude blocks", func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Path: ".", + Sync: config.Sync{ + Include: []string{}, + Exclude: []string{}, + }, + }, + } + + err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, err) + }) +} diff --git a/bundle/config/mutator/select_target.go b/bundle/config/mutator/select_target.go index 2ad431128..95558f030 100644 --- a/bundle/config/mutator/select_target.go +++ b/bundle/config/mutator/select_target.go @@ -30,13 +30,13 @@ func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) error { } // Get specified target - target, ok := b.Config.Targets[m.name] + _, ok := b.Config.Targets[m.name] if !ok { return fmt.Errorf("%s: no such target. Available targets: %s", m.name, strings.Join(maps.Keys(b.Config.Targets), ", ")) } // Merge specified target into root configuration structure. - err := b.Config.MergeTargetOverrides(target) + err := b.Config.MergeTargetOverrides(m.name) if err != nil { return err } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 67f15d407..96ff88f3f 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -9,8 +9,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -44,10 +44,6 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, JobSettings: &jobs.JobSettings{ GitSource: &jobs.GitSource{ GitBranch: "somebranch", @@ -80,6 +76,8 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) require.NoError(t, err) @@ -116,9 +114,6 @@ func TestTranslatePaths(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -171,9 +166,6 @@ func TestTranslatePaths(t *testing.T) { }, Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -207,6 +199,8 @@ func TestTranslatePaths(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) require.NoError(t, err) @@ -287,9 +281,6 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "job/resource.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -323,10 +314,6 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { }, Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "pipeline/resource.yml"), - }, - PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -342,6 +329,9 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { }, } + bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml")) + bundletest.SetLocation(b, "resources.pipelines", filepath.Join(dir, "pipeline/resource.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) require.NoError(t, err) @@ -385,9 +375,6 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "../resource.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -403,6 +390,8 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, err, "is not contained in bundle root") } @@ -416,9 +405,6 @@ func TestJobNotebookDoesNotExistError(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "fake.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -434,6 +420,8 @@ func TestJobNotebookDoesNotExistError(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.EqualError(t, err, "notebook ./doesnt_exist.py not found") } @@ -447,9 +435,6 @@ func TestJobFileDoesNotExistError(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "fake.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -465,6 +450,8 @@ func TestJobFileDoesNotExistError(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.EqualError(t, err, "file ./doesnt_exist.py not found") } @@ -478,9 +465,6 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "fake.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -496,6 +480,8 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.EqualError(t, err, "notebook ./doesnt_exist.py not found") } @@ -509,9 +495,6 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "fake.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -527,6 +510,8 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.EqualError(t, err, "file ./doesnt_exist.py not found") } @@ -544,9 +529,6 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -562,6 +544,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, err, `expected a file for "tasks.spark_python_task.python_file" but got a notebook`) } @@ -579,9 +563,6 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -597,6 +578,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, err, `expected a notebook for "tasks.notebook_task.notebook_path" but got a file`) } @@ -614,9 +597,6 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -632,6 +612,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, err, `expected a notebook for "libraries.notebook.path" but got a file`) } @@ -649,9 +631,6 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -667,6 +646,8 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, err, `expected a file for "libraries.file.path" but got a notebook`) } diff --git a/bundle/config/paths/paths.go b/bundle/config/paths/paths.go index 2c9ecb8c0..68c32a48c 100644 --- a/bundle/config/paths/paths.go +++ b/bundle/config/paths/paths.go @@ -3,12 +3,25 @@ package paths import ( "fmt" "path/filepath" + + "github.com/databricks/cli/libs/dyn" ) type Paths struct { // Absolute path on the local file system to the configuration file that holds // the definition of this resource. ConfigFilePath string `json:"-" bundle:"readonly"` + + // DynamicValue stores the [dyn.Value] of the containing struct. + // This assumes that this struct is always embedded. + DynamicValue dyn.Value `json:"-"` +} + +func (p *Paths) ConfigureConfigFilePath() { + if !p.DynamicValue.IsValid() { + panic("DynamicValue not set") + } + p.ConfigFilePath = p.DynamicValue.Location().File } func (p *Paths) ConfigFileDirectory() (string, error) { diff --git a/bundle/config/resources.go b/bundle/config/resources.go index d0b64d1a3..457360a0c 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -126,51 +126,30 @@ func (r *Resources) VerifyUniqueResourceIdentifiers() (*UniqueResourceIdTracker, return tracker, nil } -// SetConfigFilePath sets the specified path for all resources contained in this instance. +// ConfigureConfigFilePath sets the specified path for all resources contained in this instance. // This property is used to correctly resolve paths relative to the path // of the configuration file they were defined in. -func (r *Resources) SetConfigFilePath(path string) { +func (r *Resources) ConfigureConfigFilePath() { for _, e := range r.Jobs { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } for _, e := range r.Pipelines { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } for _, e := range r.Models { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } for _, e := range r.Experiments { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } for _, e := range r.ModelServingEndpoints { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } for _, e := range r.RegisteredModels { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } } -// Merge iterates over all resources and merges chunks of the -// resource configuration that can be merged. For example, for -// jobs, this merges job cluster definitions and tasks that -// use the same `job_cluster_key`, or `task_key`, respectively. -func (r *Resources) Merge() error { - for _, job := range r.Jobs { - if err := job.MergeJobClusters(); err != nil { - return err - } - if err := job.MergeTasks(); err != nil { - return err - } - } - for _, pipeline := range r.Pipelines { - if err := pipeline.MergeClusters(); err != nil { - return err - } - } - return nil -} - type ConfigResource interface { Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) TerraformResourceName() string diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index da85f94dc..45e9662d9 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -9,7 +9,6 @@ import ( "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/imdario/mergo" ) type Job struct { @@ -30,72 +29,6 @@ func (s Job) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// MergeJobClusters merges job clusters with the same key. -// The job clusters field is a slice, and as such, overrides are appended to it. -// We can identify a job cluster by its key, however, so we can use this key -// to figure out which definitions are actually overrides and merge them. -func (j *Job) MergeJobClusters() error { - keys := make(map[string]*jobs.JobCluster) - output := make([]jobs.JobCluster, 0, len(j.JobClusters)) - - // Target overrides are always appended, so we can iterate in natural order to - // first find the base definition, and merge instances we encounter later. - for i := range j.JobClusters { - key := j.JobClusters[i].JobClusterKey - - // Register job cluster with key if not yet seen before. - ref, ok := keys[key] - if !ok { - output = append(output, j.JobClusters[i]) - keys[key] = &output[len(output)-1] - continue - } - - // Merge this instance into the reference. - err := mergo.Merge(ref, &j.JobClusters[i], mergo.WithOverride, mergo.WithAppendSlice) - if err != nil { - return err - } - } - - // Overwrite resulting slice. - j.JobClusters = output - return nil -} - -// MergeTasks merges tasks with the same key. -// The tasks field is a slice, and as such, overrides are appended to it. -// We can identify a task by its task key, however, so we can use this key -// to figure out which definitions are actually overrides and merge them. -func (j *Job) MergeTasks() error { - keys := make(map[string]*jobs.Task) - tasks := make([]jobs.Task, 0, len(j.Tasks)) - - // Target overrides are always appended, so we can iterate in natural order to - // first find the base definition, and merge instances we encounter later. - for i := range j.Tasks { - key := j.Tasks[i].TaskKey - - // Register the task with key if not yet seen before. - ref, ok := keys[key] - if !ok { - tasks = append(tasks, j.Tasks[i]) - keys[key] = &tasks[len(tasks)-1] - continue - } - - // Merge this instance into the reference. - err := mergo.Merge(ref, &j.Tasks[i], mergo.WithOverride, mergo.WithAppendSlice) - if err != nil { - return err - } - } - - // Overwrite resulting slice. - j.Tasks = tasks - return nil -} - func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { jobId, err := strconv.Atoi(id) if err != nil { diff --git a/bundle/config/resources/job_test.go b/bundle/config/resources/job_test.go deleted file mode 100644 index 24b82fabb..000000000 --- a/bundle/config/resources/job_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package resources - -import ( - "testing" - - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestJobMergeJobClusters(t *testing.T) { - j := &Job{ - JobSettings: &jobs.JobSettings{ - JobClusters: []jobs.JobCluster{ - { - JobClusterKey: "foo", - NewCluster: &compute.ClusterSpec{ - SparkVersion: "13.3.x-scala2.12", - NodeTypeId: "i3.xlarge", - NumWorkers: 2, - }, - }, - { - JobClusterKey: "bar", - NewCluster: &compute.ClusterSpec{ - SparkVersion: "10.4.x-scala2.12", - }, - }, - { - JobClusterKey: "foo", - NewCluster: &compute.ClusterSpec{ - NodeTypeId: "i3.2xlarge", - NumWorkers: 4, - }, - }, - }, - }, - } - - err := j.MergeJobClusters() - require.NoError(t, err) - - assert.Len(t, j.JobClusters, 2) - assert.Equal(t, "foo", j.JobClusters[0].JobClusterKey) - assert.Equal(t, "bar", j.JobClusters[1].JobClusterKey) - - // This job cluster was merged with a subsequent one. - jc0 := j.JobClusters[0].NewCluster - assert.Equal(t, "13.3.x-scala2.12", jc0.SparkVersion) - assert.Equal(t, "i3.2xlarge", jc0.NodeTypeId) - assert.Equal(t, 4, jc0.NumWorkers) - - // This job cluster was left untouched. - jc1 := j.JobClusters[1].NewCluster - assert.Equal(t, "10.4.x-scala2.12", jc1.SparkVersion) -} - -func TestJobMergeTasks(t *testing.T) { - j := &Job{ - JobSettings: &jobs.JobSettings{ - Tasks: []jobs.Task{ - { - TaskKey: "foo", - NewCluster: &compute.ClusterSpec{ - SparkVersion: "13.3.x-scala2.12", - NodeTypeId: "i3.xlarge", - NumWorkers: 2, - }, - Libraries: []compute.Library{ - {Whl: "package1"}, - }, - }, - { - TaskKey: "bar", - NewCluster: &compute.ClusterSpec{ - SparkVersion: "10.4.x-scala2.12", - }, - }, - { - TaskKey: "foo", - NewCluster: &compute.ClusterSpec{ - NodeTypeId: "i3.2xlarge", - NumWorkers: 4, - }, - Libraries: []compute.Library{ - {Pypi: &compute.PythonPyPiLibrary{ - Package: "package2", - }}, - }, - }, - }, - }, - } - - err := j.MergeTasks() - require.NoError(t, err) - - assert.Len(t, j.Tasks, 2) - assert.Equal(t, "foo", j.Tasks[0].TaskKey) - assert.Equal(t, "bar", j.Tasks[1].TaskKey) - - // This task was merged with a subsequent one. - task0 := j.Tasks[0] - cluster := task0.NewCluster - assert.Equal(t, "13.3.x-scala2.12", cluster.SparkVersion) - assert.Equal(t, "i3.2xlarge", cluster.NodeTypeId) - assert.Equal(t, 4, cluster.NumWorkers) - assert.Len(t, task0.Libraries, 2) - assert.Equal(t, task0.Libraries[0].Whl, "package1") - assert.Equal(t, task0.Libraries[1].Pypi.Package, "package2") - - // This task was left untouched. - task1 := j.Tasks[1].NewCluster - assert.Equal(t, "10.4.x-scala2.12", task1.SparkVersion) -} diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index 97aeef156..2f9ff8d0d 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -2,14 +2,12 @@ package resources import ( "context" - "strings" "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/pipelines" - "github.com/imdario/mergo" ) type Pipeline struct { @@ -30,53 +28,6 @@ func (s Pipeline) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// MergeClusters merges cluster definitions with same label. -// The clusters field is a slice, and as such, overrides are appended to it. -// We can identify a cluster by its label, however, so we can use this label -// to figure out which definitions are actually overrides and merge them. -// -// Note: the cluster label is optional and defaults to 'default'. -// We therefore ALSO merge all clusters without a label. -func (p *Pipeline) MergeClusters() error { - clusters := make(map[string]*pipelines.PipelineCluster) - output := make([]pipelines.PipelineCluster, 0, len(p.Clusters)) - - // Normalize cluster labels. - // If empty, this defaults to "default". - // To make matching case insensitive, labels are lowercased. - for i := range p.Clusters { - label := p.Clusters[i].Label - if label == "" { - label = "default" - } - p.Clusters[i].Label = strings.ToLower(label) - } - - // Target overrides are always appended, so we can iterate in natural order to - // first find the base definition, and merge instances we encounter later. - for i := range p.Clusters { - label := p.Clusters[i].Label - - // Register pipeline cluster with label if not yet seen before. - ref, ok := clusters[label] - if !ok { - output = append(output, p.Clusters[i]) - clusters[label] = &output[len(output)-1] - continue - } - - // Merge this instance into the reference. - err := mergo.Merge(ref, &p.Clusters[i], mergo.WithOverride, mergo.WithAppendSlice) - if err != nil { - return err - } - } - - // Overwrite resulting slice. - p.Clusters = output - return nil -} - func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { _, err := w.Pipelines.Get(ctx, pipelines.GetPipelineRequest{ PipelineId: id, diff --git a/bundle/config/resources/pipeline_test.go b/bundle/config/resources/pipeline_test.go deleted file mode 100644 index 316e3d145..000000000 --- a/bundle/config/resources/pipeline_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package resources - -import ( - "strings" - "testing" - - "github.com/databricks/databricks-sdk-go/service/pipelines" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestPipelineMergeClusters(t *testing.T) { - p := &Pipeline{ - PipelineSpec: &pipelines.PipelineSpec{ - Clusters: []pipelines.PipelineCluster{ - { - NodeTypeId: "i3.xlarge", - NumWorkers: 2, - PolicyId: "1234", - }, - { - Label: "maintenance", - NodeTypeId: "i3.2xlarge", - }, - { - NodeTypeId: "i3.2xlarge", - NumWorkers: 4, - }, - }, - }, - } - - err := p.MergeClusters() - require.NoError(t, err) - - assert.Len(t, p.Clusters, 2) - assert.Equal(t, "default", p.Clusters[0].Label) - assert.Equal(t, "maintenance", p.Clusters[1].Label) - - // The default cluster was merged with a subsequent one. - pc0 := p.Clusters[0] - assert.Equal(t, "i3.2xlarge", pc0.NodeTypeId) - assert.Equal(t, 4, pc0.NumWorkers) - assert.Equal(t, "1234", pc0.PolicyId) - - // The maintenance cluster was left untouched. - pc1 := p.Clusters[1] - assert.Equal(t, "i3.2xlarge", pc1.NodeTypeId) -} - -func TestPipelineMergeClustersCaseInsensitive(t *testing.T) { - p := &Pipeline{ - PipelineSpec: &pipelines.PipelineSpec{ - Clusters: []pipelines.PipelineCluster{ - { - Label: "default", - NumWorkers: 2, - }, - { - Label: "DEFAULT", - NumWorkers: 4, - }, - }, - }, - } - - err := p.MergeClusters() - require.NoError(t, err) - - assert.Len(t, p.Clusters, 1) - - // The default cluster was merged with a subsequent one. - pc0 := p.Clusters[0] - assert.Equal(t, "default", strings.ToLower(pc0.Label)) - assert.Equal(t, 4, pc0.NumWorkers) -} diff --git a/bundle/config/root.go b/bundle/config/root.go index 94cc0b177..c8b6c5999 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -1,6 +1,8 @@ package config import ( + "bytes" + "context" "fmt" "os" "path/filepath" @@ -8,12 +10,20 @@ import ( "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/ghodss/yaml" - "github.com/imdario/mergo" ) type Root struct { + value dyn.Value + diags diag.Diagnostics + depth int + // Path contains the directory path to the root of the bundle. // It is set when loading `databricks.yml`. Path string `json:"-" bundle:"readonly"` @@ -70,48 +80,169 @@ func Load(path string) (*Root, error) { return nil, err } - var r Root - err = yaml.Unmarshal(raw, &r) + r := Root{ + Path: filepath.Dir(path), + } + + // Load configuration tree from YAML. + v, err := yamlloader.LoadYAML(path, bytes.NewBuffer(raw)) if err != nil { return nil, fmt.Errorf("failed to load %s: %w", path, err) } - if r.Environments != nil && r.Targets != nil { - return nil, fmt.Errorf("both 'environments' and 'targets' are specified, only 'targets' should be used: %s", path) + // Rewrite configuration tree where necessary. + v, err = rewriteShorthands(v) + if err != nil { + return nil, fmt.Errorf("failed to rewrite %s: %w", path, err) } - if r.Environments != nil { - //TODO: add a command line notice that this is a deprecated option. - r.Targets = r.Environments - } + // Normalize dynamic configuration tree according to configuration type. + v, diags := convert.Normalize(r, v) - r.Path = filepath.Dir(path) - r.SetConfigFilePath(path) + // Keep track of diagnostics (warnings and errors in the schema). + // We delay acting on diagnostics until we have loaded all + // configuration files and merged them together. + r.diags = diags + + // Convert normalized configuration tree to typed configuration. + err = r.updateWithDynamicValue(v) + if err != nil { + return nil, fmt.Errorf("failed to load %s: %w", path, err) + } _, err = r.Resources.VerifyUniqueResourceIdentifiers() return &r, err } -// SetConfigFilePath configures the path that its configuration -// was loaded from in configuration leafs that require it. -func (r *Root) SetConfigFilePath(path string) { - r.Resources.SetConfigFilePath(path) - if r.Artifacts != nil { - r.Artifacts.SetConfigFilePath(path) +func (r *Root) initializeDynamicValue() error { + // Many test cases initialize a config as a Go struct literal. + // The value will be invalid and we need to populate it from the typed configuration. + if r.value.IsValid() { + return nil } - if r.Targets != nil { - for _, env := range r.Targets { - if env == nil { - continue - } - if env.Resources != nil { - env.Resources.SetConfigFilePath(path) - } - if env.Artifacts != nil { - env.Artifacts.SetConfigFilePath(path) - } + nv, err := convert.FromTyped(r, dyn.NilValue) + if err != nil { + return err + } + + r.value = nv + return nil +} + +func (r *Root) updateWithDynamicValue(nv dyn.Value) error { + // Hack: restore state; it may be cleared by [ToTyped] if + // the configuration equals nil (happens in tests). + diags := r.diags + depth := r.depth + path := r.Path + + defer func() { + r.diags = diags + r.depth = depth + r.Path = path + }() + + // Convert normalized configuration tree to typed configuration. + err := convert.ToTyped(r, nv) + if err != nil { + return err + } + + // Assign the normalized configuration tree. + r.value = nv + + // Assign config file paths after converting to typed configuration. + r.ConfigureConfigFilePath() + return nil +} + +func (r *Root) Mutate(fn func(dyn.Value) (dyn.Value, error)) error { + err := r.initializeDynamicValue() + if err != nil { + return err + } + nv, err := fn(r.value) + if err != nil { + return err + } + err = r.updateWithDynamicValue(nv) + if err != nil { + return err + } + return nil +} + +func (r *Root) MarkMutatorEntry(ctx context.Context) error { + err := r.initializeDynamicValue() + if err != nil { + return err + } + + r.depth++ + + // If we are entering a mutator at depth 1, we need to convert + // the dynamic configuration tree to typed configuration. + if r.depth == 1 { + // Always run ToTyped upon entering a mutator. + // Convert normalized configuration tree to typed configuration. + err := r.updateWithDynamicValue(r.value) + if err != nil { + log.Warnf(ctx, "unable to convert dynamic configuration to typed configuration: %v", err) + return err } + + } else { + nv, err := convert.FromTyped(r, r.value) + if err != nil { + log.Warnf(ctx, "unable to convert typed configuration to dynamic configuration: %v", err) + return err + } + + // Re-run ToTyped to ensure that no state is piggybacked + err = r.updateWithDynamicValue(nv) + if err != nil { + log.Warnf(ctx, "unable to convert dynamic configuration to typed configuration: %v", err) + return err + } + } + + return nil +} + +func (r *Root) MarkMutatorExit(ctx context.Context) error { + r.depth-- + + // If we are exiting a mutator at depth 0, we need to convert + // the typed configuration to a dynamic configuration tree. + if r.depth == 0 { + nv, err := convert.FromTyped(r, r.value) + if err != nil { + log.Warnf(ctx, "unable to convert typed configuration to dynamic configuration: %v", err) + return err + } + + // Re-run ToTyped to ensure that no state is piggybacked + err = r.updateWithDynamicValue(nv) + if err != nil { + log.Warnf(ctx, "unable to convert dynamic configuration to typed configuration: %v", err) + return err + } + } + + return nil +} + +func (r *Root) Diagnostics() diag.Diagnostics { + return r.diags +} + +// SetConfigFilePath configures the path that its configuration +// was loaded from in configuration leafs that require it. +func (r *Root) ConfigureConfigFilePath() { + r.Resources.ConfigureConfigFilePath() + if r.Artifacts != nil { + r.Artifacts.ConfigureConfigFilePath() } } @@ -139,125 +270,188 @@ func (r *Root) InitializeVariables(vars []string) error { } func (r *Root) Merge(other *Root) error { - err := r.Sync.Merge(r, other) - if err != nil { - return err - } - other.Sync = Sync{} - - // TODO: when hooking into merge semantics, disallow setting path on the target instance. - other.Path = "" + // Merge diagnostics. + r.diags = append(r.diags, other.diags...) // Check for safe merge, protecting against duplicate resource identifiers - err = r.Resources.VerifySafeMerge(&other.Resources) + err := r.Resources.VerifySafeMerge(&other.Resources) if err != nil { return err } - // TODO: define and test semantics for merging. - return mergo.Merge(r, other, mergo.WithOverride) + // Merge dynamic configuration values. + return r.Mutate(func(root dyn.Value) (dyn.Value, error) { + return merge.Merge(root, other.value) + }) } -func (r *Root) MergeTargetOverrides(target *Target) error { +func mergeField(rv, ov dyn.Value, name string) (dyn.Value, error) { + path := dyn.NewPath(dyn.Key(name)) + reference, _ := dyn.GetByPath(rv, path) + override, _ := dyn.GetByPath(ov, path) + + // Merge the override into the reference. + var out dyn.Value var err error - - // Target may be nil if it's empty. - if target == nil { - return nil + if reference.IsValid() && override.IsValid() { + out, err = merge.Merge(reference, override) + if err != nil { + return dyn.InvalidValue, err + } + } else if reference.IsValid() { + out = reference + } else if override.IsValid() { + out = override + } else { + return rv, nil } - if target.Bundle != nil { - err = mergo.Merge(&r.Bundle, target.Bundle, mergo.WithOverride) + return dyn.SetByPath(rv, path, out) +} + +func (r *Root) MergeTargetOverrides(name string) error { + root := r.value + target, err := dyn.GetByPath(root, dyn.NewPath(dyn.Key("targets"), dyn.Key(name))) + if err != nil { + return err + } + + // Confirm validity of variable overrides. + err = validateVariableOverrides(root, target) + if err != nil { + return err + } + + // Merge fields that can be merged 1:1. + for _, f := range []string{ + "bundle", + "workspace", + "artifacts", + "resources", + "sync", + "permissions", + "variables", + } { + if root, err = mergeField(root, target, f); err != nil { + return err + } + } + + // Merge `run_as`. This field must be overwritten if set, not merged. + if v := target.Get("run_as"); v != dyn.NilValue { + root, err = dyn.Set(root, "run_as", v) if err != nil { return err } } - if target.Workspace != nil { - err = mergo.Merge(&r.Workspace, target.Workspace, mergo.WithOverride) + // Below, we're setting fields on the bundle key, so make sure it exists. + if root.Get("bundle") == dyn.NilValue { + root, err = dyn.Set(root, "bundle", dyn.NewValue(map[string]dyn.Value{}, dyn.Location{})) if err != nil { return err } } - if target.Artifacts != nil { - err = mergo.Merge(&r.Artifacts, target.Artifacts, mergo.WithOverride, mergo.WithAppendSlice) + // Merge `mode`. This field must be overwritten if set, not merged. + if v := target.Get("mode"); v != dyn.NilValue { + root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("mode")), v) if err != nil { return err } } - if target.Resources != nil { - err = mergo.Merge(&r.Resources, target.Resources, mergo.WithOverride, mergo.WithAppendSlice) - if err != nil { - return err - } - - err = r.Resources.Merge() + // Merge `compute_id`. This field must be overwritten if set, not merged. + if v := target.Get("compute_id"); v != dyn.NilValue { + root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")), v) if err != nil { return err } } - if target.Variables != nil { - for k, v := range target.Variables { - rootVariable, ok := r.Variables[k] - if !ok { - return fmt.Errorf("variable %s is not defined but is assigned a value", k) - } + // Merge `git`. + if v := target.Get("git"); v != dyn.NilValue { + ref, err := dyn.GetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("git"))) + if err != nil { + ref = dyn.NewValue(map[string]dyn.Value{}, dyn.Location{}) + } - if sv, ok := v.(string); ok { - // we allow overrides of the default value for a variable - defaultVal := sv - rootVariable.Default = &defaultVal - } else if vv, ok := v.(map[string]any); ok { - // we also allow overrides of the lookup value for a variable - lookup, ok := vv["lookup"] - if !ok { - return fmt.Errorf("variable %s is incorrectly defined lookup override, no 'lookup' key defined", k) - } - rootVariable.Lookup = variable.LookupFromMap(lookup.(map[string]any)) - } else { - return fmt.Errorf("variable %s is incorrectly defined in target override", k) + // Merge the override into the reference. + out, err := merge.Merge(ref, v) + if err != nil { + return err + } + + // If the branch was overridden, we need to clear the inferred flag. + if branch := v.Get("branch"); branch != dyn.NilValue { + out, err = dyn.SetByPath(out, dyn.NewPath(dyn.Key("inferred")), dyn.NewValue(false, dyn.Location{})) + if err != nil { + return err } } - } - if target.RunAs != nil { - r.RunAs = target.RunAs - } - - if target.Mode != "" { - r.Bundle.Mode = target.Mode - } - - if target.ComputeID != "" { - r.Bundle.ComputeID = target.ComputeID - } - - git := &r.Bundle.Git - if target.Git.Branch != "" { - git.Branch = target.Git.Branch - git.Inferred = false - } - if target.Git.Commit != "" { - git.Commit = target.Git.Commit - } - if target.Git.OriginURL != "" { - git.OriginURL = target.Git.OriginURL - } - - if target.Sync != nil { - err = mergo.Merge(&r.Sync, target.Sync, mergo.WithAppendSlice) + // Set the merged value. + root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("git")), out) if err != nil { return err } } - if target.Permissions != nil { - err = mergo.Merge(&r.Permissions, target.Permissions, mergo.WithAppendSlice) - if err != nil { - return err + // Convert normalized configuration tree to typed configuration. + return r.updateWithDynamicValue(root) +} + +// rewriteShorthands performs lightweight rewriting of the configuration +// tree where we allow users to write a shorthand and must rewrite to the full form. +func rewriteShorthands(v dyn.Value) (dyn.Value, error) { + if v.Kind() != dyn.KindMap { + return v, nil + } + + // For each target, rewrite the variables block. + return dyn.Map(v, "targets", dyn.Foreach(func(target dyn.Value) (dyn.Value, error) { + // Confirm it has a variables block. + if target.Get("variables") == dyn.NilValue { + return target, nil + } + + // For each variable, normalize its contents if it is a single string. + return dyn.Map(target, "variables", dyn.Foreach(func(variable dyn.Value) (dyn.Value, error) { + if variable.Kind() != dyn.KindString { + return variable, nil + } + + // Rewrite the variable to a map with a single key called "default". + // This conforms to the variable type. + return dyn.NewValue(map[string]dyn.Value{ + "default": variable, + }, variable.Location()), nil + })) + })) +} + +// validateVariableOverrides checks that all variables specified +// in the target override are also defined in the root. +func validateVariableOverrides(root, target dyn.Value) (err error) { + var rv map[string]variable.Variable + var tv map[string]variable.Variable + + // Collect variables from the root. + err = convert.ToTyped(&rv, root.Get("variables")) + if err != nil { + return fmt.Errorf("unable to collect variables from root: %w", err) + } + + // Collect variables from the target. + err = convert.ToTyped(&tv, target.Get("variables")) + if err != nil { + return fmt.Errorf("unable to collect variables from target: %w", err) + } + + // Check that all variables in the target exist in the root. + for k := range tv { + if _, ok := rv[k]; !ok { + return fmt.Errorf("variable %s is not defined but is assigned a value", k) } } diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index 3f37da07a..3b25fb1f8 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -30,51 +30,6 @@ func TestRootLoad(t *testing.T) { assert.Equal(t, "basic", root.Bundle.Name) } -func TestRootMergeStruct(t *testing.T) { - root := &Root{ - Path: "path", - Workspace: Workspace{ - Host: "foo", - Profile: "profile", - }, - } - other := &Root{ - Path: "path", - Workspace: Workspace{ - Host: "bar", - }, - } - assert.NoError(t, root.Merge(other)) - assert.Equal(t, "bar", root.Workspace.Host) - assert.Equal(t, "profile", root.Workspace.Profile) -} - -func TestRootMergeMap(t *testing.T) { - root := &Root{ - Path: "path", - Targets: map[string]*Target{ - "development": { - Workspace: &Workspace{ - Host: "foo", - Profile: "profile", - }, - }, - }, - } - other := &Root{ - Path: "path", - Targets: map[string]*Target{ - "development": { - Workspace: &Workspace{ - Host: "bar", - }, - }, - }, - } - assert.NoError(t, root.Merge(other)) - assert.Equal(t, &Workspace{Host: "bar", Profile: "profile"}, root.Targets["development"].Workspace) -} - func TestDuplicateIdOnLoadReturnsError(t *testing.T) { _, err := Load("./testdata/duplicate_resource_names_in_root/databricks.yml") assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)") @@ -154,8 +109,13 @@ func TestInitializeVariablesUndefinedVariables(t *testing.T) { func TestRootMergeTargetOverridesWithMode(t *testing.T) { root := &Root{ Bundle: Bundle{}, + Targets: map[string]*Target{ + "development": { + Mode: Development, + }, + }, } - env := &Target{Mode: Development} - require.NoError(t, root.MergeTargetOverrides(env)) + root.initializeDynamicValue() + require.NoError(t, root.MergeTargetOverrides("development")) assert.Equal(t, Development, root.Bundle.Mode) } diff --git a/bundle/config/sync.go b/bundle/config/sync.go index 6ba2603c4..0580e4c4f 100644 --- a/bundle/config/sync.go +++ b/bundle/config/sync.go @@ -1,7 +1,5 @@ package config -import "path/filepath" - type Sync struct { // Include contains a list of globs evaluated relative to the bundle root path // to explicitly include files that were excluded by the user's gitignore. @@ -13,19 +11,3 @@ type Sync struct { // 2) the `Include` field above. Exclude []string `json:"exclude,omitempty"` } - -func (s *Sync) Merge(root *Root, other *Root) error { - path, err := filepath.Rel(root.Path, other.Path) - if err != nil { - return err - } - for _, include := range other.Sync.Include { - s.Include = append(s.Include, filepath.Join(path, include)) - } - - for _, exclude := range other.Sync.Exclude { - s.Exclude = append(s.Exclude, filepath.Join(path, exclude)) - } - - return nil -} diff --git a/bundle/config/target.go b/bundle/config/target.go index 158f25606..acc493574 100644 --- a/bundle/config/target.go +++ b/bundle/config/target.go @@ -2,6 +2,7 @@ package config import ( "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -33,7 +34,7 @@ type Target struct { // Override default values or lookup name for defined variables // Does not permit defining new variables or redefining existing ones // in the scope of an target - Variables map[string]any `json:"variables,omitempty"` + Variables map[string]*variable.Variable `json:"variables,omitempty"` Git Git `json:"git,omitempty"` diff --git a/bundle/config/variable/variable.go b/bundle/config/variable/variable.go index 9057f1cb9..5e700a9b0 100644 --- a/bundle/config/variable/variable.go +++ b/bundle/config/variable/variable.go @@ -4,8 +4,6 @@ import ( "fmt" ) -const VariableReferencePrefix = "var" - // An input variable for the bundle config type Variable struct { // A default value which then makes the variable optional diff --git a/bundle/deploy/metadata/compute_test.go b/bundle/deploy/metadata/compute_test.go index a1a97aab3..e717ebd53 100644 --- a/bundle/deploy/metadata/compute_test.go +++ b/bundle/deploy/metadata/compute_test.go @@ -6,8 +6,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" "github.com/databricks/cli/bundle/metadata" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/assert" @@ -36,18 +36,12 @@ func TestComputeMetadataMutator(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my-job-1": { - Paths: paths.Paths{ - ConfigFilePath: "a/b/c", - }, ID: "1111", JobSettings: &jobs.JobSettings{ Name: "My Job One", }, }, "my-job-2": { - Paths: paths.Paths{ - ConfigFilePath: "d/e/f", - }, ID: "2222", JobSettings: &jobs.JobSettings{ Name: "My Job Two", @@ -55,16 +49,16 @@ func TestComputeMetadataMutator(t *testing.T) { }, }, Pipelines: map[string]*resources.Pipeline{ - "my-pipeline": { - Paths: paths.Paths{ - ConfigFilePath: "abc", - }, - }, + "my-pipeline": {}, }, }, }, } + bundletest.SetLocation(b, "resources.jobs.my-job-1", "a/b/c") + bundletest.SetLocation(b, "resources.jobs.my-job-2", "d/e/f") + bundletest.SetLocation(b, "resources.pipelines.my-pipeline", "abc") + expectedMetadata := metadata.Metadata{ Version: metadata.Version, Config: metadata.Config{ diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index 4f00c27eb..525a38fa8 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -1,44 +1,64 @@ package terraform import ( + "context" "fmt" - "strings" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/interpolation" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/dynvar" ) -// Rewrite variable references to resources into Terraform compatible format. -func interpolateTerraformResourceIdentifiers(path string, lookup map[string]string) (string, error) { - parts := strings.Split(path, interpolation.Delimiter) - if parts[0] == "resources" { - switch parts[1] { - case "pipelines": - path = strings.Join(append([]string{"databricks_pipeline"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - case "jobs": - path = strings.Join(append([]string{"databricks_job"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - case "models": - path = strings.Join(append([]string{"databricks_mlflow_model"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - case "experiments": - path = strings.Join(append([]string{"databricks_mlflow_experiment"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - case "model_serving_endpoints": - path = strings.Join(append([]string{"databricks_model_serving"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - case "registered_models": - path = strings.Join(append([]string{"databricks_registered_model"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - default: - panic("TODO: " + parts[1]) - } - } - - return interpolation.DefaultLookup(path, lookup) +type interpolateMutator struct { } func Interpolate() bundle.Mutator { - return interpolation.Interpolate(interpolateTerraformResourceIdentifiers) + return &interpolateMutator{} +} + +func (m *interpolateMutator) Name() string { + return "terraform.Interpolate" +} + +func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) error { + return b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { + prefix := dyn.MustPathFromString("resources") + + // Resolve variable references in all values. + return dynvar.Resolve(root, func(path dyn.Path) (dyn.Value, error) { + // Expect paths of the form: + // - resources...... + if !path.HasPrefix(prefix) || len(path) < 4 { + return dyn.InvalidValue, dynvar.ErrSkipResolution + } + + // Rewrite the bundle configuration path: + // + // ${resources.pipelines.my_pipeline.id} + // + // into the Terraform-compatible resource identifier: + // + // ${databricks_pipeline.my_pipeline.id} + // + switch path[1] { + case dyn.Key("pipelines"): + path = dyn.NewPath(dyn.Key("databricks_pipeline")).Append(path[2:]...) + case dyn.Key("jobs"): + path = dyn.NewPath(dyn.Key("databricks_job")).Append(path[2:]...) + case dyn.Key("models"): + path = dyn.NewPath(dyn.Key("databricks_mlflow_model")).Append(path[2:]...) + case dyn.Key("experiments"): + path = dyn.NewPath(dyn.Key("databricks_mlflow_experiment")).Append(path[2:]...) + case dyn.Key("model_serving_endpoints"): + path = dyn.NewPath(dyn.Key("databricks_model_serving")).Append(path[2:]...) + case dyn.Key("registered_models"): + path = dyn.NewPath(dyn.Key("databricks_registered_model")).Append(path[2:]...) + default: + // Trigger "key not found" for unknown resource types. + return dyn.GetByPath(root, path) + } + + return dyn.V(fmt.Sprintf("${%s}", path.String())), nil + }) + }) } diff --git a/bundle/deploy/terraform/interpolate_test.go b/bundle/deploy/terraform/interpolate_test.go new file mode 100644 index 000000000..be905ad77 --- /dev/null +++ b/bundle/deploy/terraform/interpolate_test.go @@ -0,0 +1,92 @@ +package terraform + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInterpolate(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my_job": { + JobSettings: &jobs.JobSettings{ + Tags: map[string]string{ + "other_pipeline": "${resources.pipelines.other_pipeline.id}", + "other_job": "${resources.jobs.other_job.id}", + "other_model": "${resources.models.other_model.id}", + "other_experiment": "${resources.experiments.other_experiment.id}", + "other_model_serving": "${resources.model_serving_endpoints.other_model_serving.id}", + "other_registered_model": "${resources.registered_models.other_registered_model.id}", + }, + Tasks: []jobs.Task{ + { + TaskKey: "my_task", + NotebookTask: &jobs.NotebookTask{ + BaseParameters: map[string]string{ + "model_name": "${resources.models.my_model.name}", + }, + }, + }, + }, + }, + }, + }, + Models: map[string]*resources.MlflowModel{ + "my_model": { + Model: &ml.Model{ + Name: "my_model", + }, + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, Interpolate()) + require.NoError(t, err) + + j := b.Config.Resources.Jobs["my_job"] + assert.Equal(t, "${databricks_pipeline.other_pipeline.id}", j.Tags["other_pipeline"]) + assert.Equal(t, "${databricks_job.other_job.id}", j.Tags["other_job"]) + assert.Equal(t, "${databricks_mlflow_model.other_model.id}", j.Tags["other_model"]) + assert.Equal(t, "${databricks_mlflow_experiment.other_experiment.id}", j.Tags["other_experiment"]) + assert.Equal(t, "${databricks_model_serving.other_model_serving.id}", j.Tags["other_model_serving"]) + assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"]) + + m := b.Config.Resources.Models["my_model"] + assert.Equal(t, "my_model", m.Model.Name) +} + +func TestInterpolateUnknownResourceType(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my_job": { + JobSettings: &jobs.JobSettings{ + Tags: map[string]string{ + "other_unknown": "${resources.unknown.other_unknown.id}", + }, + }, + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, Interpolate()) + assert.Contains(t, err.Error(), `reference does not exist: ${resources.unknown.other_unknown.id}`) +} diff --git a/bundle/internal/bundletest/location.go b/bundle/internal/bundletest/location.go new file mode 100644 index 000000000..1fd6f968c --- /dev/null +++ b/bundle/internal/bundletest/location.go @@ -0,0 +1,34 @@ +package bundletest + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dyn" +) + +// SetLocation sets the location of all values in the bundle to the given path. +// This is useful for testing where we need to associate configuration +// with the path it is loaded from. +func SetLocation(b *bundle.Bundle, prefix string, filePath string) { + start := dyn.MustPathFromString(prefix) + b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { + return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + // If the path has the given prefix, set the location. + if p.HasPrefix(start) { + return v.WithLocation(dyn.Location{ + File: filePath, + }), nil + } + + // The path is not nested under the given prefix. + // If the path is a prefix of the prefix, keep traversing and return the node verbatim. + if start.HasPrefix(p) { + return v, nil + } + + // Return verbatim, but skip traversal. + return v, dyn.ErrSkip + }) + }) + + b.Config.ConfigureConfigFilePath() +} diff --git a/bundle/mutator.go b/bundle/mutator.go index e559d2375..bd1615fd7 100644 --- a/bundle/mutator.go +++ b/bundle/mutator.go @@ -20,7 +20,21 @@ func Apply(ctx context.Context, b *Bundle, m Mutator) error { ctx = log.NewContext(ctx, log.GetLogger(ctx).With("mutator", m.Name())) log.Debugf(ctx, "Apply") - err := m.Apply(ctx, b) + + err := b.Config.MarkMutatorEntry(ctx) + if err != nil { + log.Errorf(ctx, "entry error: %s", err) + return err + } + + defer func() { + err := b.Config.MarkMutatorExit(ctx) + if err != nil { + log.Errorf(ctx, "exit error: %s", err) + } + }() + + err = m.Apply(ctx, b) if err != nil { log.Errorf(ctx, "Error: %s", err) return err @@ -28,3 +42,20 @@ func Apply(ctx context.Context, b *Bundle, m Mutator) error { return nil } + +type funcMutator struct { + fn func(context.Context, *Bundle) error +} + +func (m funcMutator) Name() string { + return "" +} + +func (m funcMutator) Apply(ctx context.Context, b *Bundle) error { + return m.fn(ctx, b) +} + +// ApplyFunc applies an inline-specified function mutator. +func ApplyFunc(ctx context.Context, b *Bundle, fn func(context.Context, *Bundle) error) error { + return Apply(ctx, b, funcMutator{fn}) +} diff --git a/bundle/phases/build.go b/bundle/phases/build.go index 760967fca..362d23be1 100644 --- a/bundle/phases/build.go +++ b/bundle/phases/build.go @@ -4,7 +4,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/interpolation" + "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/scripts" ) @@ -18,8 +18,8 @@ func Build() bundle.Mutator { artifacts.InferMissingProperties(), artifacts.BuildAll(), scripts.Execute(config.ScriptPostBuild), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath("artifacts"), + mutator.ResolveVariableReferences( + "artifacts", ), }, ) diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index e0558d937..2c401c6b2 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -3,9 +3,7 @@ package phases import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/interpolation" "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/cli/bundle/deploy/metadata" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/permissions" @@ -20,6 +18,10 @@ func Initialize() bundle.Mutator { return newPhase( "initialize", []bundle.Mutator{ + mutator.RewriteSyncPaths(), + mutator.MergeJobClusters(), + mutator.MergeJobTasks(), + mutator.MergePipelineClusters(), mutator.InitializeWorkspaceClient(), mutator.PopulateCurrentUser(), mutator.DefineDefaultWorkspaceRoot(), @@ -27,10 +29,10 @@ func Initialize() bundle.Mutator { mutator.DefineDefaultWorkspacePaths(), mutator.SetVariables(), mutator.ResolveResourceReferences(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath("bundle"), - interpolation.IncludeLookupsInPath("workspace"), - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), + mutator.ResolveVariableReferences( + "bundle", + "workspace", + "variables", ), mutator.SetRunAs(), mutator.OverrideCompute(), diff --git a/bundle/tests/bundle/pipeline_glob_paths_test.go b/bundle/tests/bundle/pipeline_glob_paths_test.go index 539ffc9d3..8f2b62a6b 100644 --- a/bundle/tests/bundle/pipeline_glob_paths_test.go +++ b/bundle/tests/bundle/pipeline_glob_paths_test.go @@ -5,30 +5,34 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" - "github.com/databricks/cli/cmd/root" + "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) { ctx := context.Background() - ctx = root.SetWorkspaceClient(ctx, nil) - b, err := bundle.Load(ctx, "./pipeline_glob_paths") require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutatorsForTarget("default")...)) require.NoError(t, err) - b.Config.Bundle.Target = "default" - b.Config.Workspace.CurrentUser = &config.User{User: &iam.User{UserName: "user@domain.com"}} - b.WorkspaceClient() + // Configure mock workspace client + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &config.Config{ + Host: "https://mock.databricks.workspace.com", + } + m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{ + UserName: "user@domain.com", + }, nil) + b.SetWorkpaceClient(m.WorkspaceClient) - m := phases.Initialize() - err = bundle.Apply(ctx, b, m) + err = bundle.Apply(ctx, b, phases.Initialize()) require.Error(t, err) require.ErrorContains(t, err, "notebook ./non-existent not found") diff --git a/bundle/tests/environment_overrides/resources/databricks.yml b/bundle/tests/environment_overrides/resources/databricks.yml index df261ba03..137f8d9df 100644 --- a/bundle/tests/environment_overrides/resources/databricks.yml +++ b/bundle/tests/environment_overrides/resources/databricks.yml @@ -28,8 +28,6 @@ environments: pipelines: boolean1: - # Note: setting a property to a zero value (in Go) does not have effect. - # See the corresponding test for details. photon: false boolean2: diff --git a/bundle/tests/environment_overrides_test.go b/bundle/tests/environment_overrides_test.go index 91dc2c811..4a1115048 100644 --- a/bundle/tests/environment_overrides_test.go +++ b/bundle/tests/environment_overrides_test.go @@ -29,10 +29,7 @@ func TestEnvironmentOverridesResourcesStaging(t *testing.T) { b := loadTarget(t, "./environment_overrides/resources", "staging") assert.Equal(t, "staging job", b.Config.Resources.Jobs["job1"].Name) - // Overrides are only applied if they are not zero-valued. - // This means that in its current form, we cannot override a true value with a false value. - // Note: this is not desirable and will be addressed by representing our configuration - // in a different structure (e.g. with cty), instead of Go structs. - assert.Equal(t, true, b.Config.Resources.Pipelines["boolean1"].Photon) + // Override values are applied in the staging environment. + assert.Equal(t, false, b.Config.Resources.Pipelines["boolean1"].Photon) assert.Equal(t, true, b.Config.Resources.Pipelines["boolean2"].Photon) } diff --git a/bundle/tests/interpolation_test.go b/bundle/tests/interpolation_test.go index 837891a07..a9659d33f 100644 --- a/bundle/tests/interpolation_test.go +++ b/bundle/tests/interpolation_test.go @@ -5,16 +5,16 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/interpolation" + "github.com/databricks/cli/bundle/config/mutator" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestInterpolation(t *testing.T) { b := load(t, "./interpolation") - err := bundle.Apply(context.Background(), b, interpolation.Interpolate( - interpolation.IncludeLookupsInPath("bundle"), - interpolation.IncludeLookupsInPath("workspace"), + err := bundle.Apply(context.Background(), b, mutator.ResolveVariableReferences( + "bundle", + "workspace", )) require.NoError(t, err) assert.Equal(t, "foo bar", b.Config.Bundle.Name) @@ -23,9 +23,9 @@ func TestInterpolation(t *testing.T) { func TestInterpolationWithTarget(t *testing.T) { b := loadTarget(t, "./interpolation_target", "development") - err := bundle.Apply(context.Background(), b, interpolation.Interpolate( - interpolation.IncludeLookupsInPath("bundle"), - interpolation.IncludeLookupsInPath("workspace"), + err := bundle.Apply(context.Background(), b, mutator.ResolveVariableReferences( + "bundle", + "workspace", )) require.NoError(t, err) assert.Equal(t, "foo bar", b.Config.Bundle.Name) diff --git a/bundle/tests/job_with_spark_conf_test.go b/bundle/tests/job_with_spark_conf_test.go index a2c04c5ee..90bdc977d 100644 --- a/bundle/tests/job_with_spark_conf_test.go +++ b/bundle/tests/job_with_spark_conf_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestJobWithSparkConf(t *testing.T) { @@ -14,9 +15,17 @@ func TestJobWithSparkConf(t *testing.T) { assert.Len(t, job.JobClusters, 1) assert.Equal(t, "test_cluster", job.JobClusters[0].JobClusterKey) - // Existing behavior is such that including non-string values - // in the spark_conf map will cause the job to fail to load. - // This is expected to be solved once we switch to the custom YAML loader. - tasks := job.Tasks - assert.Len(t, tasks, 0, "see https://github.com/databricks/cli/issues/992") + // This test exists because of https://github.com/databricks/cli/issues/992. + // It is solved for bundles as of https://github.com/databricks/cli/pull/1098. + require.Len(t, job.JobClusters, 1) + cluster := job.JobClusters[0] + assert.Equal(t, "14.2.x-scala2.12", cluster.NewCluster.SparkVersion) + assert.Equal(t, "i3.xlarge", cluster.NewCluster.NodeTypeId) + assert.Equal(t, 2, cluster.NewCluster.NumWorkers) + assert.Equal(t, map[string]string{ + "spark.string": "string", + "spark.int": "1", + "spark.bool": "true", + "spark.float": "1.2", + }, cluster.NewCluster.SparkConf) } diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index f23b10764..3a28d822a 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -19,8 +19,17 @@ func load(t *testing.T, path string) *bundle.Bundle { } func loadTarget(t *testing.T, path, env string) *bundle.Bundle { - b := load(t, path) - err := bundle.Apply(context.Background(), b, mutator.SelectTarget(env)) + ctx := context.Background() + b, err := bundle.Load(ctx, path) + require.NoError(t, err) + err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutatorsForTarget(env)...)) + require.NoError(t, err) + err = bundle.Apply(ctx, b, bundle.Seq( + mutator.RewriteSyncPaths(), + mutator.MergeJobClusters(), + mutator.MergeJobTasks(), + mutator.MergePipelineClusters(), + )) require.NoError(t, err) return b } diff --git a/bundle/tests/override_sync_test.go b/bundle/tests/override_sync_test.go index a2d3a05f5..64f28e377 100644 --- a/bundle/tests/override_sync_test.go +++ b/bundle/tests/override_sync_test.go @@ -1,40 +1,38 @@ package config_tests import ( + "path/filepath" "testing" + "github.com/databricks/cli/bundle" "github.com/stretchr/testify/assert" ) func TestOverrideSyncTarget(t *testing.T) { - b := load(t, "./override_sync") - assert.ElementsMatch(t, []string{"src/*"}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + var b *bundle.Bundle b = loadTarget(t, "./override_sync", "development") - assert.ElementsMatch(t, []string{"src/*", "tests/*"}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{"dist"}, b.Config.Sync.Exclude) + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) b = loadTarget(t, "./override_sync", "staging") - assert.ElementsMatch(t, []string{"src/*", "fixtures/*"}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) b = loadTarget(t, "./override_sync", "prod") - assert.ElementsMatch(t, []string{"src/*"}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*")}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) } func TestOverrideSyncTargetNoRootSync(t *testing.T) { - b := load(t, "./override_sync_no_root") - assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + var b *bundle.Bundle b = loadTarget(t, "./override_sync_no_root", "development") - assert.ElementsMatch(t, []string{"tests/*"}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{"dist"}, b.Config.Sync.Exclude) + assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) b = loadTarget(t, "./override_sync_no_root", "staging") - assert.ElementsMatch(t, []string{"fixtures/*"}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) b = loadTarget(t, "./override_sync_no_root", "prod") diff --git a/bundle/tests/relative_path_with_includes_test.go b/bundle/tests/relative_path_with_includes_test.go index 92249c412..1d1f321d4 100644 --- a/bundle/tests/relative_path_with_includes_test.go +++ b/bundle/tests/relative_path_with_includes_test.go @@ -11,7 +11,7 @@ import ( ) func TestRelativePathsWithIncludes(t *testing.T) { - b := load(t, "./relative_path_with_includes") + b := loadTarget(t, "./relative_path_with_includes", "default") m := mutator.TranslatePaths() err := bundle.Apply(context.Background(), b, m) @@ -20,8 +20,22 @@ func TestRelativePathsWithIncludes(t *testing.T) { assert.Equal(t, "artifact_a", b.Config.Artifacts["test_a"].Path) assert.Equal(t, filepath.Join("subfolder", "artifact_b"), b.Config.Artifacts["test_b"].Path) - assert.ElementsMatch(t, []string{"./folder_a/*.*", filepath.Join("subfolder", "folder_c", "*.*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{"./folder_b/*.*", filepath.Join("subfolder", "folder_d", "*.*")}, b.Config.Sync.Exclude) + assert.ElementsMatch( + t, + []string{ + filepath.Join("folder_a", "*.*"), + filepath.Join("subfolder", "folder_c", "*.*"), + }, + b.Config.Sync.Include, + ) + assert.ElementsMatch( + t, + []string{ + filepath.Join("folder_b", "*.*"), + filepath.Join("subfolder", "folder_d", "*.*"), + }, + b.Config.Sync.Exclude, + ) assert.Equal(t, filepath.Join("dist", "job_a.whl"), b.Config.Resources.Jobs["job_a"].Tasks[0].Libraries[0].Whl) assert.Equal(t, filepath.Join("subfolder", "dist", "job_b.whl"), b.Config.Resources.Jobs["job_b"].Tasks[0].Libraries[0].Whl) diff --git a/bundle/tests/run_as/databricks.yml b/bundle/tests/run_as/databricks.yml index 18ea55736..1cdc9e44b 100644 --- a/bundle/tests/run_as/databricks.yml +++ b/bundle/tests/run_as/databricks.yml @@ -13,30 +13,42 @@ targets: resources: pipelines: nyc_taxi_pipeline: + name: "nyc taxi loader" + permissions: - level: CAN_VIEW service_principal_name: my_service_principal - level: CAN_VIEW user_name: my_user_name - name: "nyc taxi loader" + libraries: - notebook: path: ./dlt/nyc_taxi_loader + jobs: job_one: name: Job One + tasks: - - task: + - task_key: "task_one" + notebook_task: notebook_path: "./test.py" + job_two: name: Job Two + tasks: - - task: + - task_key: "task_two" + notebook_task: notebook_path: "./test.py" + job_three: name: Job Three + run_as: service_principal_name: "my_service_principal_for_job" + tasks: - - task: + - task_key: "task_three" + notebook_task: notebook_path: "./test.py" diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 44c068165..98aaf6358 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -13,12 +13,17 @@ import ( func TestRunAsDefault(t *testing.T) { b := load(t, "./run_as") - b.Config.Workspace.CurrentUser = &config.User{ - User: &iam.User{ - UserName: "jane@doe.com", - }, - } + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + return nil + }) + err := bundle.Apply(ctx, b, mutator.SetRunAs()) assert.NoError(t, err) @@ -39,21 +44,26 @@ func TestRunAsDefault(t *testing.T) { pipelines := b.Config.Resources.Pipelines assert.Len(t, pipelines["nyc_taxi_pipeline"].Permissions, 2) - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].Level, "CAN_VIEW") - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].UserName, "my_user_name") + assert.Equal(t, "CAN_VIEW", pipelines["nyc_taxi_pipeline"].Permissions[0].Level) + assert.Equal(t, "my_user_name", pipelines["nyc_taxi_pipeline"].Permissions[0].UserName) - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].Level, "IS_OWNER") - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].ServicePrincipalName, "my_service_principal") + assert.Equal(t, "IS_OWNER", pipelines["nyc_taxi_pipeline"].Permissions[1].Level) + assert.Equal(t, "my_service_principal", pipelines["nyc_taxi_pipeline"].Permissions[1].ServicePrincipalName) } func TestRunAsDevelopment(t *testing.T) { b := loadTarget(t, "./run_as", "development") - b.Config.Workspace.CurrentUser = &config.User{ - User: &iam.User{ - UserName: "jane@doe.com", - }, - } + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + return nil + }) + err := bundle.Apply(ctx, b, mutator.SetRunAs()) assert.NoError(t, err) @@ -74,9 +84,9 @@ func TestRunAsDevelopment(t *testing.T) { pipelines := b.Config.Resources.Pipelines assert.Len(t, pipelines["nyc_taxi_pipeline"].Permissions, 2) - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].Level, "CAN_VIEW") - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].ServicePrincipalName, "my_service_principal") + assert.Equal(t, "CAN_VIEW", pipelines["nyc_taxi_pipeline"].Permissions[0].Level) + assert.Equal(t, "my_service_principal", pipelines["nyc_taxi_pipeline"].Permissions[0].ServicePrincipalName) - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].Level, "IS_OWNER") - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].UserName, "my_user_name") + assert.Equal(t, "IS_OWNER", pipelines["nyc_taxi_pipeline"].Permissions[1].Level) + assert.Equal(t, "my_user_name", pipelines["nyc_taxi_pipeline"].Permissions[1].UserName) } diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index 91e165b15..05314a846 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -5,9 +5,7 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/interpolation" "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/cli/bundle/config/variable" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -17,9 +15,10 @@ func TestVariables(t *testing.T) { b := load(t, "./variables/vanilla") err := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) + mutator.ResolveVariableReferences( + "variables", + ), + )) require.NoError(t, err) assert.Equal(t, "abc def", b.Config.Bundle.Name) } @@ -28,9 +27,10 @@ func TestVariablesLoadingFailsWhenRequiredVariableIsNotSpecified(t *testing.T) { b := load(t, "./variables/vanilla") err := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) + mutator.ResolveVariableReferences( + "variables", + ), + )) assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") } @@ -39,9 +39,10 @@ func TestVariablesTargetsBlockOverride(t *testing.T) { err := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-with-single-variable-override"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) + mutator.ResolveVariableReferences( + "variables", + ), + )) require.NoError(t, err) assert.Equal(t, "default-a dev-b", b.Config.Workspace.Profile) } @@ -51,9 +52,10 @@ func TestVariablesTargetsBlockOverrideForMultipleVariables(t *testing.T) { err := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-with-two-variable-overrides"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) + mutator.ResolveVariableReferences( + "variables", + ), + )) require.NoError(t, err) assert.Equal(t, "prod-a prod-b", b.Config.Workspace.Profile) } @@ -64,9 +66,10 @@ func TestVariablesTargetsBlockOverrideWithProcessEnvVars(t *testing.T) { err := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-with-two-variable-overrides"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) + mutator.ResolveVariableReferences( + "variables", + ), + )) require.NoError(t, err) assert.Equal(t, "prod-a env-var-b", b.Config.Workspace.Profile) } @@ -76,9 +79,10 @@ func TestVariablesTargetsBlockOverrideWithMissingVariables(t *testing.T) { err := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-missing-a-required-variable-assignment"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) + mutator.ResolveVariableReferences( + "variables", + ), + )) assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") } @@ -87,9 +91,10 @@ func TestVariablesTargetsBlockOverrideWithUndefinedVariables(t *testing.T) { err := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-using-an-undefined-variable"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) + mutator.ResolveVariableReferences( + "variables", + ), + )) assert.ErrorContains(t, err, "variable c is not defined but is assigned a value") } @@ -110,9 +115,7 @@ func TestVariablesWithTargetLookupOverrides(t *testing.T) { err := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-overrides-lookup"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) + )) require.NoError(t, err) assert.Equal(t, "cluster: some-test-cluster", b.Config.Variables["d"].Lookup.String()) assert.Equal(t, "instance-pool: some-test-instance-pool", b.Config.Variables["e"].Lookup.String()) diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index c76789c17..c1f0cdf29 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -1,6 +1,8 @@ package bundle import ( + "context" + "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" @@ -24,17 +26,22 @@ func newDeployCommand() *cobra.Command { cmd.Flags().StringVarP(&computeID, "compute-id", "c", "", "Override compute in the deployment with the given compute ID.") cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) + ctx := cmd.Context() + b := bundle.Get(ctx) - b.Config.Bundle.Force = force - b.Config.Bundle.Deployment.Lock.Force = forceLock - b.Config.Bundle.ComputeID = computeID + bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) error { + b.Config.Bundle.Force = force + b.Config.Bundle.Deployment.Lock.Force = forceLock + b.Config.Bundle.ComputeID = computeID - if cmd.Flag("fail-on-active-runs").Changed { - b.Config.Bundle.Deployment.FailOnActiveRuns = failOnActiveRuns - } + if cmd.Flag("fail-on-active-runs").Changed { + b.Config.Bundle.Deployment.FailOnActiveRuns = failOnActiveRuns + } - return bundle.Apply(cmd.Context(), b, bundle.Seq( + return nil + }) + + return bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), phases.Build(), phases.Deploy(), diff --git a/cmd/bundle/deployment/bind.go b/cmd/bundle/deployment/bind.go index 541292807..1287eb044 100644 --- a/cmd/bundle/deployment/bind.go +++ b/cmd/bundle/deployment/bind.go @@ -1,6 +1,7 @@ package deployment import ( + "context" "fmt" "github.com/databricks/cli/bundle" @@ -25,15 +26,14 @@ func newBindCommand() *cobra.Command { cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.") cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) - r := b.Config.Resources - resource, err := r.FindResourceByConfigKey(args[0]) + ctx := cmd.Context() + b := bundle.Get(ctx) + resource, err := b.Config.Resources.FindResourceByConfigKey(args[0]) if err != nil { return err } w := b.WorkspaceClient() - ctx := cmd.Context() exists, err := resource.Exists(ctx, w, args[1]) if err != nil { return fmt.Errorf("failed to fetch the resource, err: %w", err) @@ -43,8 +43,12 @@ func newBindCommand() *cobra.Command { return fmt.Errorf("%s with an id '%s' is not found", resource.TerraformResourceName(), args[1]) } - b.Config.Bundle.Deployment.Lock.Force = forceLock - err = bundle.Apply(cmd.Context(), b, bundle.Seq( + bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) error { + b.Config.Bundle.Deployment.Lock.Force = forceLock + return nil + }) + + err = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), phases.Bind(&terraform.BindOptions{ AutoApprove: autoApprove, diff --git a/cmd/bundle/deployment/unbind.go b/cmd/bundle/deployment/unbind.go index e7de8a3d4..9f0e4f7c7 100644 --- a/cmd/bundle/deployment/unbind.go +++ b/cmd/bundle/deployment/unbind.go @@ -1,6 +1,8 @@ package deployment import ( + "context" + "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" @@ -19,14 +21,18 @@ func newUnbindCommand() *cobra.Command { cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.") cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) - r := b.Config.Resources - resource, err := r.FindResourceByConfigKey(args[0]) + ctx := cmd.Context() + b := bundle.Get(ctx) + resource, err := b.Config.Resources.FindResourceByConfigKey(args[0]) if err != nil { return err } - b.Config.Bundle.Deployment.Lock.Force = forceLock + bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) error { + b.Config.Bundle.Deployment.Lock.Force = forceLock + return nil + }) + return bundle.Apply(cmd.Context(), b, bundle.Seq( phases.Initialize(), phases.Unbind(resource.TerraformResourceName(), args[0]), diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index a0bfb1a4a..958681f06 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -1,6 +1,7 @@ package bundle import ( + "context" "fmt" "os" @@ -30,11 +31,15 @@ func newDestroyCommand() *cobra.Command { ctx := cmd.Context() b := bundle.Get(ctx) - // If `--force-lock` is specified, force acquisition of the deployment lock. - b.Config.Bundle.Deployment.Lock.Force = forceDestroy + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + // If `--force-lock` is specified, force acquisition of the deployment lock. + b.Config.Bundle.Deployment.Lock.Force = forceDestroy - // If `--auto-approve`` is specified, we skip confirmation checks - b.AutoApprove = autoApprove + // If `--auto-approve`` is specified, we skip confirmation checks + b.AutoApprove = autoApprove + + return nil + }) // we require auto-approve for non tty terminals since interactive consent // is not possible diff --git a/cmd/bundle/utils/utils.go b/cmd/bundle/utils/utils.go index f68ab06b0..e900f47c3 100644 --- a/cmd/bundle/utils/utils.go +++ b/cmd/bundle/utils/utils.go @@ -1,6 +1,8 @@ package utils import ( + "context" + "github.com/databricks/cli/bundle" "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" @@ -20,5 +22,7 @@ func ConfigureBundleWithVariables(cmd *cobra.Command, args []string) error { // Initialize variables by assigning them values passed as command line flags b := bundle.Get(cmd.Context()) - return b.Config.InitializeVariables(variables) + return bundle.ApplyFunc(cmd.Context(), b, func(ctx context.Context, b *bundle.Bundle) error { + return b.Config.InitializeVariables(variables) + }) } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 01b8c18ac..f235e097b 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/libs/log" "github.com/spf13/cobra" ) @@ -25,6 +26,12 @@ func newValidateCommand() *cobra.Command { return err } + // Until we change up the output of this command to be a text representation, + // we'll just output all diagnostics as debug logs. + for _, diag := range b.Config.Diagnostics() { + log.Debugf(cmd.Context(), "[%s]: %s", diag.Location, diag.Summary) + } + buf, err := json.MarshalIndent(b.Config, "", " ") if err != nil { return err diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index 3f9d90db6..edfc1f431 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -64,7 +64,13 @@ func loadBundle(cmd *cobra.Command, args []string, load func(ctx context.Context profile := getProfile(cmd) if profile != "" { - b.Config.Workspace.Profile = profile + err = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + b.Config.Workspace.Profile = profile + return nil + }) + if err != nil { + return nil, err + } } err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) diff --git a/go.mod b/go.mod index 4aaecd1d0..9fd37e6e0 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,6 @@ require ( github.com/hashicorp/hc-install v0.6.3 // MPL 2.0 github.com/hashicorp/terraform-exec v0.20.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.21.0 // MPL 2.0 - github.com/imdario/mergo v0.3.15 // BSD-3-Clause github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT github.com/nwidger/jsoncolor v0.3.2 // MIT diff --git a/go.sum b/go.sum index 545ff9e35..3826f15da 100644 --- a/go.sum +++ b/go.sum @@ -106,8 +106,6 @@ github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8J github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 549b393d2..0f3769ece 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -33,15 +33,6 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { whlPath := filepath.Join(dir, "dist", "test.whl") touchEmptyFile(t, whlPath) - artifact := &config.Artifact{ - Type: "whl", - Files: []config.ArtifactFile{ - { - Source: whlPath, - }, - }, - } - wsDir := internal.TemporaryWorkspaceDir(t, w) b := &bundle.Bundle{ @@ -54,7 +45,14 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { ArtifactPath: wsDir, }, Artifacts: config.Artifacts{ - "test": artifact, + "test": &config.Artifact{ + Type: "whl", + Files: []config.ArtifactFile{ + { + Source: whlPath, + }, + }, + }, }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -80,9 +78,14 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { require.NoError(t, err) // The remote path attribute on the artifact file should have been set. - require.Regexp(t, regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), artifact.Files[0].RemotePath) + require.Regexp(t, + regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + b.Config.Artifacts["test"].Files[0].RemotePath, + ) // The task library path should have been updated to the remote path. - lib := b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0] - require.Regexp(t, regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), lib.Whl) + require.Regexp(t, + regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl, + ) } diff --git a/libs/dyn/merge/elements_by_key.go b/libs/dyn/merge/elements_by_key.go new file mode 100644 index 000000000..3ce571bf7 --- /dev/null +++ b/libs/dyn/merge/elements_by_key.go @@ -0,0 +1,67 @@ +package merge + +import "github.com/databricks/cli/libs/dyn" + +type elementsByKey struct { + key string + keyFunc func(dyn.Value) string +} + +func (e elementsByKey) Map(v dyn.Value) (dyn.Value, error) { + // We know the type of this value is a sequence. + // For additional defence, return self if it is not. + elements, ok := v.AsSequence() + if !ok { + return v, nil + } + + seen := make(map[string]dyn.Value, len(elements)) + keys := make([]string, 0, len(elements)) + + // Iterate in natural order. For a given key, we first see the + // base definition and merge instances that come after it. + for i := range elements { + kv := elements[i].Get(e.key) + key := e.keyFunc(kv) + + // Register element with key if not yet seen before. + ref, ok := seen[key] + if !ok { + keys = append(keys, key) + seen[key] = elements[i] + continue + } + + // Merge this instance into the reference. + nv, err := Merge(ref, elements[i]) + if err != nil { + return v, err + } + + // Overwrite reference. + seen[key] = nv + } + + // Gather resulting elements in natural order. + out := make([]dyn.Value, 0, len(keys)) + for _, key := range keys { + nv, err := dyn.Set(seen[key], e.key, dyn.V(key)) + if err != nil { + return dyn.InvalidValue, err + } + out = append(out, nv) + } + + return dyn.NewValue(out, v.Location()), nil +} + +// ElementsByKey returns a [dyn.MapFunc] that operates on a sequence +// where each element is a map. It groups elements by a key and merges +// elements with the same key. +// +// The function that extracts the key from an element is provided as +// a parameter. The resulting elements get their key field overwritten +// with the value as returned by the key function. +func ElementsByKey(key string, keyFunc func(dyn.Value) string) dyn.MapFunc { + return elementsByKey{key, keyFunc}.Map +} diff --git a/libs/dyn/merge/elements_by_key_test.go b/libs/dyn/merge/elements_by_key_test.go new file mode 100644 index 000000000..c61f834e5 --- /dev/null +++ b/libs/dyn/merge/elements_by_key_test.go @@ -0,0 +1,52 @@ +package merge + +import ( + "strings" + "testing" + + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestElementByKey(t *testing.T) { + vin := dyn.V([]dyn.Value{ + dyn.V(map[string]dyn.Value{ + "key": dyn.V("foo"), + "value": dyn.V(42), + }), + dyn.V(map[string]dyn.Value{ + "key": dyn.V("bar"), + "value": dyn.V(43), + }), + dyn.V(map[string]dyn.Value{ + // Use upper case key to test that the resulting element has its + // key field assigned to the output of the key function. + // The key function in this test returns the lower case version of the key. + "key": dyn.V("FOO"), + "value": dyn.V(44), + }), + }) + + keyFunc := func(v dyn.Value) string { + return strings.ToLower(v.MustString()) + } + + vout, err := dyn.MapByPath(vin, dyn.EmptyPath, ElementsByKey("key", keyFunc)) + require.NoError(t, err) + assert.Len(t, vout.MustSequence(), 2) + assert.Equal(t, + vout.Index(0).AsAny(), + map[string]any{ + "key": "foo", + "value": 44, + }, + ) + assert.Equal(t, + vout.Index(1).AsAny(), + map[string]any{ + "key": "bar", + "value": 43, + }, + ) +} diff --git a/libs/dyn/value.go b/libs/dyn/value.go index e9c22bfbe..ecf21abbe 100644 --- a/libs/dyn/value.go +++ b/libs/dyn/value.go @@ -42,6 +42,15 @@ func NewValue(v any, loc Location) Value { } } +// WithLocation returns a new Value with its location set to the given value. +func (v Value) WithLocation(loc Location) Value { + return Value{ + v: v.v, + k: v.k, + l: loc, + } +} + func (v Value) Kind() Kind { return v.k } diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 8d0c21010..e541259e0 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -66,7 +66,11 @@ func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target st require.NoError(t, err) // Apply initialize / validation mutators - b.Config.Workspace.CurrentUser = &bundleConfig.User{User: cachedUser} + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + b.Config.Workspace.CurrentUser = &bundleConfig.User{User: cachedUser} + return nil + }) + b.Tagging = tags.ForCloud(w.Config) b.WorkspaceClient() b.Config.Bundle.Terraform = &bundleConfig.Terraform{ From f70ec359dcee7da682702c86fb7e585ae32027fc Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 16 Feb 2024 21:54:38 +0100 Subject: [PATCH 041/286] Use `dyn.Value` as input to generating Terraform JSON (#1218) ## Changes This builds on #1098 and uses the `dyn.Value` representation of the bundle configuration to generate the Terraform JSON definition of resources in the bundle. The existing code (in `BundleToTerraform`) was not great and in an effort to slightly improve this, I added a package `tfdyn` that includes dedicated files for each resource type. Every resource type has its own conversion type that takes the `dyn.Value` of the bundle-side resource and converts it into Terraform resources (e.g. a job and optionally its permissions). Because we now use a `dyn.Value` as input, we can represent and emit zero-values that have so far been omitted. For example, setting `num_workers: 0` in your bundle configuration now propagates all the way to the Terraform JSON definition. ## Tests * Unit tests for every converter. I reused the test inputs from `convert_test.go`. * Equivalence tests in every existing test case checks that the resulting JSON is identical. * I manually compared the TF JSON file generated by the CLI from the main branch and from this PR on all of our bundles and bundle examples (internal and external) and found the output doesn't change (with the exception of the odd zero-value being included by the version in this PR). --- bundle/deploy/terraform/convert.go | 61 +++++++++ bundle/deploy/terraform/convert_test.go | 64 +++++++++ bundle/deploy/terraform/tfdyn/convert.go | 23 ++++ .../terraform/tfdyn/convert_experiment.go | 45 ++++++ .../tfdyn/convert_experiment_test.go | 52 +++++++ .../deploy/terraform/tfdyn/convert_grants.go | 39 ++++++ .../terraform/tfdyn/convert_grants_test.go | 71 ++++++++++ bundle/deploy/terraform/tfdyn/convert_job.go | 87 ++++++++++++ .../terraform/tfdyn/convert_job_test.go | 129 ++++++++++++++++++ .../deploy/terraform/tfdyn/convert_model.go | 45 ++++++ .../tfdyn/convert_model_serving_endpoint.go | 45 ++++++ .../convert_model_serving_endpoint_test.go | 88 ++++++++++++ .../terraform/tfdyn/convert_model_test.go | 74 ++++++++++ .../terraform/tfdyn/convert_permissions.go | 32 +++++ .../tfdyn/convert_permissions_test.go | 85 ++++++++++++ .../terraform/tfdyn/convert_pipeline.go | 55 ++++++++ .../terraform/tfdyn/convert_pipeline_test.go | 128 +++++++++++++++++ .../tfdyn/convert_registered_model.go | 45 ++++++ .../tfdyn/convert_registered_model_test.go | 58 ++++++++ bundle/deploy/terraform/tfdyn/rename_keys.go | 46 +++++++ bundle/deploy/terraform/write.go | 12 +- libs/dyn/path.go | 8 ++ 22 files changed, 1291 insertions(+), 1 deletion(-) create mode 100644 bundle/deploy/terraform/tfdyn/convert.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_experiment.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_experiment_test.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_grants.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_grants_test.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_job.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_job_test.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_model.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_model_test.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_permissions.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_permissions_test.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_pipeline.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_pipeline_test.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_registered_model.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_registered_model_test.go create mode 100644 bundle/deploy/terraform/tfdyn/rename_keys.go diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index 6723caee3..f2fb77e18 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -1,13 +1,16 @@ package terraform import ( + "context" "encoding/json" "fmt" "reflect" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/deploy/terraform/tfdyn" "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" tfjson "github.com/hashicorp/terraform-json" ) @@ -228,6 +231,64 @@ func BundleToTerraform(config *config.Root) *schema.Root { return tfroot } +// BundleToTerraformWithDynValue converts resources in a bundle configuration +// to the equivalent Terraform JSON representation. +func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema.Root, error) { + tfroot := schema.NewRoot() + tfroot.Provider = schema.NewProviders() + + // Convert each resource in the bundle to the equivalent Terraform representation. + resources, err := dyn.Get(root, "resources") + if err != nil { + // If the resources key is missing, return an empty root. + if dyn.IsNoSuchKeyError(err) { + return tfroot, nil + } + return nil, err + } + + tfroot.Resource = schema.NewResources() + + numResources := 0 + _, err = dyn.Walk(resources, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + if len(p) < 2 { + return v, nil + } + + typ := p[0].Key() + key := p[1].Key() + + // Lookup the converter based on the resource type. + c, ok := tfdyn.GetConverter(typ) + if !ok { + return dyn.InvalidValue, fmt.Errorf("no converter for resource type %s", typ) + } + + // Convert resource to Terraform representation. + err := c.Convert(ctx, key, v, tfroot.Resource) + if err != nil { + return dyn.InvalidValue, err + } + + numResources++ + + // Skip traversal of the resource itself. + return v, dyn.ErrSkip + }) + if err != nil { + return nil, err + } + + // We explicitly set "resource" to nil to omit it from a JSON encoding. + // This is required because the terraform CLI requires >= 1 resources defined + // if the "resource" property is used in a .tf.json file. + if numResources == 0 { + tfroot.Resource = nil + } + + return tfroot, nil +} + func TerraformToBundle(state *tfjson.State, config *config.Root) error { if state.Values != nil && state.Values.RootModule != nil { for _, resource := range state.Values.RootModule.Resources { diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index afef37088..fa59e092d 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -1,12 +1,16 @@ package terraform import ( + "context" + "encoding/json" "reflect" "testing" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -65,6 +69,8 @@ func TestBundleToTerraformJob(t *testing.T) { assert.Equal(t, "param1", resource.Parameter[0].Name) assert.Equal(t, "param2", resource.Parameter[1].Name) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformJobPermissions(t *testing.T) { @@ -92,6 +98,8 @@ func TestBundleToTerraformJobPermissions(t *testing.T) { assert.Len(t, resource.AccessControl, 1) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformJobTaskLibraries(t *testing.T) { @@ -128,6 +136,8 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { require.Len(t, resource.Task, 1) require.Len(t, resource.Task[0].Library, 1) assert.Equal(t, "mlflow", resource.Task[0].Library[0].Pypi.Package) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformPipeline(t *testing.T) { @@ -188,6 +198,8 @@ func TestBundleToTerraformPipeline(t *testing.T) { assert.Equal(t, resource.Notification[1].Alerts, []string{"on-update-failure", "on-flow-failure"}) assert.Equal(t, resource.Notification[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"}) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformPipelinePermissions(t *testing.T) { @@ -215,6 +227,8 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) { assert.Len(t, resource.AccessControl, 1) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModel(t *testing.T) { @@ -254,10 +268,15 @@ func TestBundleToTerraformModel(t *testing.T) { assert.Equal(t, "k2", resource.Tags[1].Key) assert.Equal(t, "v2", resource.Tags[1].Value) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModelPermissions(t *testing.T) { var src = resources.MlflowModel{ + Model: &ml.Model{ + Name: "name", + }, Permissions: []resources.Permission{ { Level: "CAN_READ", @@ -281,6 +300,8 @@ func TestBundleToTerraformModelPermissions(t *testing.T) { assert.Len(t, resource.AccessControl, 1) assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformExperiment(t *testing.T) { @@ -303,10 +324,15 @@ func TestBundleToTerraformExperiment(t *testing.T) { assert.Equal(t, "name", resource.Name) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformExperimentPermissions(t *testing.T) { var src = resources.MlflowExperiment{ + Experiment: &ml.Experiment{ + Name: "name", + }, Permissions: []resources.Permission{ { Level: "CAN_READ", @@ -331,6 +357,7 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) { assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel) + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModelServing(t *testing.T) { @@ -377,10 +404,15 @@ func TestBundleToTerraformModelServing(t *testing.T) { assert.Equal(t, "model_name-1", resource.Config.TrafficConfig.Routes[0].ServedModelName) assert.Equal(t, 100, resource.Config.TrafficConfig.Routes[0].TrafficPercentage) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModelServingPermissions(t *testing.T) { var src = resources.ModelServingEndpoint{ + CreateServingEndpoint: &serving.CreateServingEndpoint{ + Name: "name", + }, Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -405,6 +437,7 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformRegisteredModel(t *testing.T) { @@ -433,10 +466,17 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) { assert.Equal(t, "schema", resource.SchemaName) assert.Equal(t, "comment", resource.Comment) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { var src = resources.RegisteredModel{ + CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ + Name: "name", + CatalogName: "catalog", + SchemaName: "schema", + }, Grants: []resources.Grant{ { Privileges: []string{"EXECUTE"}, @@ -460,6 +500,8 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { assert.Len(t, resource.Grant, 1) assert.Equal(t, "jane@doe.com", resource.Grant[0].Principal) assert.Equal(t, "EXECUTE", resource.Grant[0].Privileges[0]) + + bundleToTerraformEquivalenceTest(t, &config) } func TestTerraformToBundleEmptyLocalResources(t *testing.T) { @@ -827,3 +869,25 @@ func AssertFullResourceCoverage(t *testing.T, config *config.Root) { } } } + +func assertEqualTerraformRoot(t *testing.T, a, b *schema.Root) { + ba, err := json.Marshal(a) + require.NoError(t, err) + bb, err := json.Marshal(b) + require.NoError(t, err) + assert.JSONEq(t, string(ba), string(bb)) +} + +func bundleToTerraformEquivalenceTest(t *testing.T, config *config.Root) { + t.Run("dyn equivalence", func(t *testing.T) { + tf1 := BundleToTerraform(config) + + vin, err := convert.FromTyped(config, dyn.NilValue) + require.NoError(t, err) + tf2, err := BundleToTerraformWithDynValue(context.Background(), vin) + require.NoError(t, err) + + // Compare roots + assertEqualTerraformRoot(t, tf1, tf2) + }) +} diff --git a/bundle/deploy/terraform/tfdyn/convert.go b/bundle/deploy/terraform/tfdyn/convert.go new file mode 100644 index 000000000..9df4e2640 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert.go @@ -0,0 +1,23 @@ +package tfdyn + +import ( + "context" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" +) + +type Converter interface { + Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error +} + +var converters = map[string]Converter{} + +func GetConverter(name string) (Converter, bool) { + c, ok := converters[name] + return c, ok +} + +func registerConverter(name string, c Converter) { + converters[name] = c +} diff --git a/bundle/deploy/terraform/tfdyn/convert_experiment.go b/bundle/deploy/terraform/tfdyn/convert_experiment.go new file mode 100644 index 000000000..0c129181f --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_experiment.go @@ -0,0 +1,45 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertExperimentResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceMlflowExperiment{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "experiment normalization diagnostic: %s", diag.Summary) + } + + return vout, nil +} + +type experimentConverter struct{} + +func (experimentConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertExperimentResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.MlflowExperiment[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.ExperimentId = fmt.Sprintf("${databricks_mlflow_experiment.%s.id}", key) + out.Permissions["mlflow_experiment_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("experiments", experimentConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_experiment_test.go b/bundle/deploy/terraform/tfdyn/convert_experiment_test.go new file mode 100644 index 000000000..63add4368 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_experiment_test.go @@ -0,0 +1,52 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertExperiment(t *testing.T) { + var src = resources.MlflowExperiment{ + Experiment: &ml.Experiment{ + Name: "name", + }, + Permissions: []resources.Permission{ + { + Level: "CAN_READ", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = experimentConverter{}.Convert(ctx, "my_experiment", vin, out) + require.NoError(t, err) + + // Assert equality on the experiment + assert.Equal(t, map[string]any{ + "name": "name", + }, out.MlflowExperiment["my_experiment"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + ExperimentId: "${databricks_mlflow_experiment.my_experiment.id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_READ", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["mlflow_experiment_my_experiment"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_grants.go b/bundle/deploy/terraform/tfdyn/convert_grants.go new file mode 100644 index 000000000..1ddd99dd7 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_grants.go @@ -0,0 +1,39 @@ +package tfdyn + +import ( + "context" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" +) + +func convertGrantsResource(ctx context.Context, vin dyn.Value) *schema.ResourceGrants { + grants, ok := vin.Get("grants").AsSequence() + if !ok || len(grants) == 0 { + return nil + } + + resource := &schema.ResourceGrants{} + for _, permission := range grants { + principal, _ := permission.Get("principal").AsString() + v, _ := permission.Get("privileges").AsSequence() + + // Turn privileges into a slice of strings. + var privileges []string + for _, privilege := range v { + str, ok := privilege.AsString() + if !ok { + continue + } + + privileges = append(privileges, str) + } + + resource.Grant = append(resource.Grant, schema.ResourceGrantsGrant{ + Principal: principal, + Privileges: privileges, + }) + } + + return resource +} diff --git a/bundle/deploy/terraform/tfdyn/convert_grants_test.go b/bundle/deploy/terraform/tfdyn/convert_grants_test.go new file mode 100644 index 000000000..a486bc36f --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_grants_test.go @@ -0,0 +1,71 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertGrants(t *testing.T) { + var src = resources.RegisteredModel{ + Grants: []resources.Grant{ + { + Privileges: []string{"EXECUTE", "FOO"}, + Principal: "jane@doe.com", + }, + { + Privileges: []string{"EXECUTE", "BAR"}, + Principal: "spn", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertGrantsResource(ctx, vin) + require.NotNil(t, resource) + assert.Equal(t, []schema.ResourceGrantsGrant{ + { + Privileges: []string{"EXECUTE", "FOO"}, + Principal: "jane@doe.com", + }, + { + Privileges: []string{"EXECUTE", "BAR"}, + Principal: "spn", + }, + }, resource.Grant) +} + +func TestConvertGrantsNil(t *testing.T) { + var src = resources.RegisteredModel{ + Grants: nil, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertGrantsResource(ctx, vin) + assert.Nil(t, resource) +} + +func TestConvertGrantsEmpty(t *testing.T) { + var src = resources.RegisteredModel{ + Grants: []resources.Grant{}, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertGrantsResource(ctx, vin) + assert.Nil(t, resource) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_job.go b/bundle/deploy/terraform/tfdyn/convert_job.go new file mode 100644 index 000000000..b488df157 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_job.go @@ -0,0 +1,87 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the input value to the underlying job schema. + // This removes superfluous keys and adapts the input to the expected schema. + vin, diags := convert.Normalize(jobs.JobSettings{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "job normalization diagnostic: %s", diag.Summary) + } + + // Modify top-level keys. + vout, err := renameKeys(vin, map[string]string{ + "tasks": "task", + "job_clusters": "job_cluster", + "parameters": "parameter", + }) + if err != nil { + return dyn.InvalidValue, err + } + + // Modify keys in the "git_source" block + vout, err = dyn.Map(vout, "git_source", func(v dyn.Value) (dyn.Value, error) { + return renameKeys(v, map[string]string{ + "git_branch": "branch", + "git_commit": "commit", + "git_provider": "provider", + "git_tag": "tag", + "git_url": "url", + }) + }) + if err != nil { + return dyn.InvalidValue, err + } + + // Modify keys in the "task" blocks + vout, err = dyn.Map(vout, "task", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + return renameKeys(v, map[string]string{ + "libraries": "library", + }) + })) + if err != nil { + return dyn.InvalidValue, err + } + + // Normalize the output value to the target schema. + vout, diags = convert.Normalize(schema.ResourceJob{}, vout) + for _, diag := range diags { + log.Debugf(ctx, "job normalization diagnostic: %s", diag.Summary) + } + + return vout, err +} + +type jobConverter struct{} + +func (jobConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertJobResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.Job[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.JobId = fmt.Sprintf("${databricks_job.%s.id}", key) + out.Permissions["job_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("jobs", jobConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_job_test.go b/bundle/deploy/terraform/tfdyn/convert_job_test.go new file mode 100644 index 000000000..4e988b143 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_job_test.go @@ -0,0 +1,129 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertJob(t *testing.T) { + var src = resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "my job", + JobClusters: []jobs.JobCluster{ + { + JobClusterKey: "key", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "10.4.x-scala2.12", + }, + }, + }, + GitSource: &jobs.GitSource{ + GitProvider: jobs.GitProviderGitHub, + GitUrl: "https://github.com/foo/bar", + }, + Parameters: []jobs.JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + }, + Tasks: []jobs.Task{ + { + TaskKey: "task_key", + JobClusterKey: "job_cluster_key", + Libraries: []compute.Library{ + { + Pypi: &compute.PythonPyPiLibrary{ + Package: "package", + }, + }, + { + Whl: "/path/to/my.whl", + }, + }, + }, + }, + }, + Permissions: []resources.Permission{ + { + Level: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = jobConverter{}.Convert(ctx, "my_job", vin, out) + require.NoError(t, err) + + // Assert equality on the job + assert.Equal(t, map[string]any{ + "name": "my job", + "job_cluster": []any{ + map[string]any{ + "job_cluster_key": "key", + "new_cluster": map[string]any{ + "spark_version": "10.4.x-scala2.12", + }, + }, + }, + "git_source": map[string]any{ + "provider": "gitHub", + "url": "https://github.com/foo/bar", + }, + "parameter": []any{ + map[string]any{ + "name": "param1", + "default": "default1", + }, + map[string]any{ + "name": "param2", + "default": "default2", + }, + }, + "task": []any{ + map[string]any{ + "task_key": "task_key", + "job_cluster_key": "job_cluster_key", + "library": []any{ + map[string]any{ + "pypi": map[string]any{ + "package": "package", + }, + }, + map[string]any{ + "whl": "/path/to/my.whl", + }, + }, + }, + }, + }, out.Job["my_job"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + JobId: "${databricks_job.my_job.id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["job_my_job"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_model.go b/bundle/deploy/terraform/tfdyn/convert_model.go new file mode 100644 index 000000000..f5d7d489b --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_model.go @@ -0,0 +1,45 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertModelResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceMlflowModel{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "model normalization diagnostic: %s", diag.Summary) + } + + return vout, nil +} + +type modelConverter struct{} + +func (modelConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertModelResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.MlflowModel[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.RegisteredModelId = fmt.Sprintf("${databricks_mlflow_model.%s.registered_model_id}", key) + out.Permissions["mlflow_model_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("models", modelConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go new file mode 100644 index 000000000..b67e4dcc3 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go @@ -0,0 +1,45 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertModelServingEndpointResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceModelServing{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "model serving endpoint normalization diagnostic: %s", diag.Summary) + } + + return vout, nil +} + +type modelServingEndpointConverter struct{} + +func (modelServingEndpointConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertModelServingEndpointResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.ModelServing[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.ServingEndpointId = fmt.Sprintf("${databricks_model_serving.%s.serving_endpoint_id}", key) + out.Permissions["model_serving_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("model_serving_endpoints", modelServingEndpointConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go new file mode 100644 index 000000000..63b75e9ab --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go @@ -0,0 +1,88 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/serving" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertModelServingEndpoint(t *testing.T) { + var src = resources.ModelServingEndpoint{ + CreateServingEndpoint: &serving.CreateServingEndpoint{ + Name: "name", + Config: serving.EndpointCoreConfigInput{ + ServedModels: []serving.ServedModelInput{ + { + ModelName: "model_name", + ModelVersion: "1", + ScaleToZeroEnabled: true, + WorkloadSize: "Small", + }, + }, + TrafficConfig: &serving.TrafficConfig{ + Routes: []serving.Route{ + { + ServedModelName: "model_name-1", + TrafficPercentage: 100, + }, + }, + }, + }, + }, + Permissions: []resources.Permission{ + { + Level: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = modelServingEndpointConverter{}.Convert(ctx, "my_model_serving_endpoint", vin, out) + require.NoError(t, err) + + // Assert equality on the model serving endpoint + assert.Equal(t, map[string]any{ + "name": "name", + "config": map[string]any{ + "served_models": []any{ + map[string]any{ + "model_name": "model_name", + "model_version": "1", + "scale_to_zero_enabled": true, + "workload_size": "Small", + }, + }, + "traffic_config": map[string]any{ + "routes": []any{ + map[string]any{ + "served_model_name": "model_name-1", + "traffic_percentage": int64(100), + }, + }, + }, + }, + }, out.ModelServing["my_model_serving_endpoint"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + ServingEndpointId: "${databricks_model_serving.my_model_serving_endpoint.serving_endpoint_id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["model_serving_my_model_serving_endpoint"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_model_test.go b/bundle/deploy/terraform/tfdyn/convert_model_test.go new file mode 100644 index 000000000..542caa878 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_model_test.go @@ -0,0 +1,74 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertModel(t *testing.T) { + var src = resources.MlflowModel{ + Model: &ml.Model{ + Name: "name", + Description: "description", + Tags: []ml.ModelTag{ + { + Key: "k1", + Value: "v1", + }, + { + Key: "k2", + Value: "v2", + }, + }, + }, + Permissions: []resources.Permission{ + { + Level: "CAN_READ", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = modelConverter{}.Convert(ctx, "my_model", vin, out) + require.NoError(t, err) + + // Assert equality on the model + assert.Equal(t, map[string]any{ + "name": "name", + "description": "description", + "tags": []any{ + map[string]any{ + "key": "k1", + "value": "v1", + }, + map[string]any{ + "key": "k2", + "value": "v2", + }, + }, + }, out.MlflowModel["my_model"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + RegisteredModelId: "${databricks_mlflow_model.my_model.registered_model_id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_READ", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["mlflow_model_my_model"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_permissions.go b/bundle/deploy/terraform/tfdyn/convert_permissions.go new file mode 100644 index 000000000..99e8d2973 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_permissions.go @@ -0,0 +1,32 @@ +package tfdyn + +import ( + "context" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" +) + +func convertPermissionsResource(ctx context.Context, vin dyn.Value) *schema.ResourcePermissions { + permissions, ok := vin.Get("permissions").AsSequence() + if !ok || len(permissions) == 0 { + return nil + } + + resource := &schema.ResourcePermissions{} + for _, permission := range permissions { + level, _ := permission.Get("level").AsString() + userName, _ := permission.Get("user_name").AsString() + groupName, _ := permission.Get("group_name").AsString() + servicePrincipalName, _ := permission.Get("service_principal_name").AsString() + + resource.AccessControl = append(resource.AccessControl, schema.ResourcePermissionsAccessControl{ + PermissionLevel: level, + UserName: userName, + GroupName: groupName, + ServicePrincipalName: servicePrincipalName, + }) + } + + return resource +} diff --git a/bundle/deploy/terraform/tfdyn/convert_permissions_test.go b/bundle/deploy/terraform/tfdyn/convert_permissions_test.go new file mode 100644 index 000000000..ba389020f --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_permissions_test.go @@ -0,0 +1,85 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertPermissions(t *testing.T) { + var src = resources.Job{ + Permissions: []resources.Permission{ + { + Level: "CAN_VIEW", + UserName: "jane@doe.com", + }, + { + Level: "CAN_MANAGE", + GroupName: "special admins", + }, + { + Level: "CAN_RUN", + ServicePrincipalName: "spn", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertPermissionsResource(ctx, vin) + require.NotNil(t, resource) + assert.Equal(t, []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_VIEW", + UserName: "jane@doe.com", + GroupName: "", + ServicePrincipalName: "", + }, + { + PermissionLevel: "CAN_MANAGE", + UserName: "", + GroupName: "special admins", + ServicePrincipalName: "", + }, + { + PermissionLevel: "CAN_RUN", + UserName: "", + GroupName: "", + ServicePrincipalName: "spn", + }, + }, resource.AccessControl) +} + +func TestConvertPermissionsNil(t *testing.T) { + var src = resources.Job{ + Permissions: nil, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertPermissionsResource(ctx, vin) + assert.Nil(t, resource) +} + +func TestConvertPermissionsEmpty(t *testing.T) { + var src = resources.Job{ + Permissions: []resources.Permission{}, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertPermissionsResource(ctx, vin) + assert.Nil(t, resource) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_pipeline.go b/bundle/deploy/terraform/tfdyn/convert_pipeline.go new file mode 100644 index 000000000..ea0c94d66 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_pipeline.go @@ -0,0 +1,55 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertPipelineResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Modify top-level keys. + vout, err := renameKeys(vin, map[string]string{ + "libraries": "library", + "clusters": "cluster", + "notifications": "notification", + }) + if err != nil { + return dyn.InvalidValue, err + } + + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourcePipeline{}, vout) + for _, diag := range diags { + log.Debugf(ctx, "pipeline normalization diagnostic: %s", diag.Summary) + } + + return vout, err +} + +type pipelineConverter struct{} + +func (pipelineConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertPipelineResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.Pipeline[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.PipelineId = fmt.Sprintf("${databricks_pipeline.%s.id}", key) + out.Permissions["pipeline_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("pipelines", pipelineConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go b/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go new file mode 100644 index 000000000..7010d463a --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go @@ -0,0 +1,128 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertPipeline(t *testing.T) { + var src = resources.Pipeline{ + PipelineSpec: &pipelines.PipelineSpec{ + Name: "my pipeline", + Libraries: []pipelines.PipelineLibrary{ + { + Notebook: &pipelines.NotebookLibrary{ + Path: "notebook path", + }, + }, + { + File: &pipelines.FileLibrary{ + Path: "file path", + }, + }, + }, + Notifications: []pipelines.Notifications{ + { + Alerts: []string{ + "on-update-fatal-failure", + }, + EmailRecipients: []string{ + "jane@doe.com", + }, + }, + { + Alerts: []string{ + "on-update-failure", + "on-flow-failure", + }, + EmailRecipients: []string{ + "jane@doe.com", + "john@doe.com", + }, + }, + }, + Clusters: []pipelines.PipelineCluster{ + { + Label: "default", + NumWorkers: 1, + }, + }, + }, + Permissions: []resources.Permission{ + { + Level: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = pipelineConverter{}.Convert(ctx, "my_pipeline", vin, out) + require.NoError(t, err) + + // Assert equality on the pipeline + assert.Equal(t, map[string]any{ + "name": "my pipeline", + "library": []any{ + map[string]any{ + "notebook": map[string]any{ + "path": "notebook path", + }, + }, + map[string]any{ + "file": map[string]any{ + "path": "file path", + }, + }, + }, + "notification": []any{ + map[string]any{ + "alerts": []any{ + "on-update-fatal-failure", + }, + "email_recipients": []any{ + "jane@doe.com", + }, + }, + map[string]any{ + "alerts": []any{ + "on-update-failure", + "on-flow-failure", + }, + "email_recipients": []any{ + "jane@doe.com", + "john@doe.com", + }, + }, + }, + "cluster": []any{ + map[string]any{ + "label": "default", + "num_workers": int64(1), + }, + }, + }, out.Pipeline["my_pipeline"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + PipelineId: "${databricks_pipeline.my_pipeline.id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["pipeline_my_pipeline"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_registered_model.go b/bundle/deploy/terraform/tfdyn/convert_registered_model.go new file mode 100644 index 000000000..20aa596f2 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_registered_model.go @@ -0,0 +1,45 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertRegisteredModelResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceRegisteredModel{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "registered model normalization diagnostic: %s", diag.Summary) + } + + return vout, nil +} + +type registeredModelConverter struct{} + +func (registeredModelConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertRegisteredModelResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.RegisteredModel[key] = vout.AsAny() + + // Configure grants for this resource. + if grants := convertGrantsResource(ctx, vin); grants != nil { + grants.Function = fmt.Sprintf("${databricks_registered_model.%s.id}", key) + out.Grants["registered_model_"+key] = grants + } + + return nil +} + +func init() { + registerConverter("registered_models", registeredModelConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go b/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go new file mode 100644 index 000000000..77096e8d0 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go @@ -0,0 +1,58 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertRegisteredModel(t *testing.T) { + var src = resources.RegisteredModel{ + CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ + Name: "name", + CatalogName: "catalog", + SchemaName: "schema", + Comment: "comment", + }, + Grants: []resources.Grant{ + { + Privileges: []string{"EXECUTE"}, + Principal: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = registeredModelConverter{}.Convert(ctx, "my_registered_model", vin, out) + require.NoError(t, err) + + // Assert equality on the registered model + assert.Equal(t, map[string]any{ + "name": "name", + "catalog_name": "catalog", + "schema_name": "schema", + "comment": "comment", + }, out.RegisteredModel["my_registered_model"]) + + // Assert equality on the grants + assert.Equal(t, &schema.ResourceGrants{ + Function: "${databricks_registered_model.my_registered_model.id}", + Grant: []schema.ResourceGrantsGrant{ + { + Privileges: []string{"EXECUTE"}, + Principal: "jane@doe.com", + }, + }, + }, out.Grants["registered_model_my_registered_model"]) +} diff --git a/bundle/deploy/terraform/tfdyn/rename_keys.go b/bundle/deploy/terraform/tfdyn/rename_keys.go new file mode 100644 index 000000000..a65c9f257 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/rename_keys.go @@ -0,0 +1,46 @@ +package tfdyn + +import ( + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" +) + +// renameKeys renames keys in the given map value. +// +// Terraform resources sometimes use singular names for repeating blocks where the API +// definition uses the plural name. This function can convert between the two. +func renameKeys(v dyn.Value, rename map[string]string) (dyn.Value, error) { + var err error + var acc = dyn.V(map[string]dyn.Value{}) + + nv, err := dyn.Walk(v, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + if len(p) == 0 { + return v, nil + } + + // Check if this key should be renamed. + for oldKey, newKey := range rename { + if p[0].Key() != oldKey { + continue + } + + // Add the new key to the accumulator. + p[0] = dyn.Key(newKey) + acc, err = dyn.SetByPath(acc, p, v) + if err != nil { + return dyn.NilValue, err + } + return dyn.InvalidValue, dyn.ErrDrop + } + + // Pass through all other values. + return v, dyn.ErrSkip + }) + + if err != nil { + return dyn.InvalidValue, err + } + + // Merge the accumulator with the original value. + return merge.Merge(nv, acc) +} diff --git a/bundle/deploy/terraform/write.go b/bundle/deploy/terraform/write.go index b53f9069d..3ec1b5812 100644 --- a/bundle/deploy/terraform/write.go +++ b/bundle/deploy/terraform/write.go @@ -7,6 +7,8 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" ) type write struct{} @@ -21,7 +23,15 @@ func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - root := BundleToTerraform(&b.Config) + var root *schema.Root + err = b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + root, err = BundleToTerraformWithDynValue(ctx, v) + return v, err + }) + if err != nil { + return err + } + f, err := os.Create(filepath.Join(dir, "bundle.tf.json")) if err != nil { return err diff --git a/libs/dyn/path.go b/libs/dyn/path.go index 34285de14..91893f921 100644 --- a/libs/dyn/path.go +++ b/libs/dyn/path.go @@ -10,6 +10,14 @@ type pathComponent struct { index int } +func (c pathComponent) Key() string { + return c.key +} + +func (c pathComponent) Index() int { + return c.index +} + func (c pathComponent) isKey() bool { return c.key != "" } From 1c680121c8d8bd29c8522f0997d03a2c89d37c31 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 19 Feb 2024 10:15:17 +0100 Subject: [PATCH 042/286] Add an experimental dbt-sql template (#1059) ## Changes This adds a new dbt-sql template. This work requires the new WorkspaceFS support for dbt tasks. In this latest revision, I've hidden the new template from the list so we can merge it, iterate over it, and propertly release the template at the right time. Blockers: - [x] WorkspaceFS support for dbt projects is in prod - [x] Move dbt files into a subdirectory - [ ] Wait until the next (>1.7.4) release of the dbt plugin which will have major improvements! - _Rather than wait, this template is hidden from the list of templates._ - [x] SQL extension is preconfigured based on extension settings (if possible) - MV / streaming tables: - [x] Add to template - [x] Fix https://github.com/databricks/dbt-databricks/issues/535 (to be released with in 1.7.4) - [x] Merge https://github.com/databricks/dbt-databricks/pull/338 (to be released with in 1.7.4) - [ ] Fix "too many 503 errors" issue (https://github.com/databricks/dbt-databricks/issues/570, internal tracker: ES-1009215, ES-1014138) - [x] Support ANSI mode in the template - [ ] Streaming tables support is either ungated or the template provides instructions about signup - _Mitigation for now: this template is hidden from the list of templates._ - [x] Support non-workspace-admin deployment - [x] Make sure `data_security_mode: SINGLE_USER` works on non-UC workspaces (it's required to be explicitly specified on UC workspaces with single-node clusters) - [x] Support non-UC workspaces ## Tests - [x] Unit tests - [x] Manual testing - [x] More manual testing - [ ] Reviewer manual testing - _I'd like to do a small bug bash post-merging._ - [x] Unit tests --- cmd/bundle/init.go | 11 +- libs/template/helpers.go | 21 +++ libs/template/renderer_test.go | 33 ++++- libs/template/templates/dbt-sql/README.md | 9 ++ .../dbt-sql/databricks_template_schema.json | 53 +++++++ .../templates/dbt-sql/library/versions.tmpl | 7 + .../dbt-sql/template/__preamble.tmpl | 9 ++ .../.vscode/__builtins__.pyi | 3 + .../{{.project_name}}/.vscode/extensions.json | 6 + .../.vscode/settings.json.tmpl | 33 +++++ .../template/{{.project_name}}/README.md.tmpl | 138 ++++++++++++++++++ .../{{.project_name}}/databricks.yml.tmpl | 32 ++++ .../dbt_profiles/profiles.yml.tmpl | 36 +++++ .../{{.project_name}}/dbt_project.yml.tmpl | 32 ++++ .../profile_template.yml.tmpl | 37 +++++ .../{{.project_name}}/requirements-dev.txt | 3 + .../resources/{{.project_name}}_job.yml.tmpl | 45 ++++++ .../{{.project_name}}/src/analyses/.gitkeep | 0 .../{{.project_name}}/src/macros/.gitkeep | 0 .../src/models/example/orders_daily.sql.tmpl | 24 +++ .../src/models/example/orders_raw.sql.tmpl | 16 ++ .../src/models/example/schema.yml | 21 +++ .../{{.project_name}}/src/seeds/.gitkeep | 0 .../{{.project_name}}/src/snapshots/.gitkeep | 0 .../{{.project_name}}/src/tests/.gitkeep | 0 .../template/{{.project_name}}/README.md.tmpl | 7 +- 26 files changed, 569 insertions(+), 7 deletions(-) create mode 100644 libs/template/templates/dbt-sql/README.md create mode 100644 libs/template/templates/dbt-sql/databricks_template_schema.json create mode 100644 libs/template/templates/dbt-sql/library/versions.tmpl create mode 100644 libs/template/templates/dbt-sql/template/__preamble.tmpl create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/__builtins__.pyi create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/extensions.json create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_project.yml.tmpl create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/profile_template.yml.tmpl create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/src/analyses/.gitkeep create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/src/macros/.gitkeep create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_daily.sql.tmpl create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_raw.sql.tmpl create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/src/seeds/.gitkeep create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/src/snapshots/.gitkeep create mode 100644 libs/template/templates/dbt-sql/template/{{.project_name}}/src/tests/.gitkeep diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 47d78f7dc..306e29038 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -25,6 +25,7 @@ type nativeTemplate struct { gitUrl string description string aliases []string + hidden bool } const customTemplate = "custom..." @@ -34,6 +35,11 @@ var nativeTemplates = []nativeTemplate{ name: "default-python", description: "The default Python template for Notebooks / Delta Live Tables / Workflows", }, + { + name: "dbt-sql", + description: "The dbt SQL template (https://www.databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks)", + hidden: true, + }, { name: "mlops-stacks", gitUrl: "https://github.com/databricks/mlops-stacks", @@ -50,7 +56,7 @@ var nativeTemplates = []nativeTemplate{ func nativeTemplateHelpDescriptions() string { var lines []string for _, template := range nativeTemplates { - if template.name != customTemplate { + if template.name != customTemplate && !template.hidden { lines = append(lines, fmt.Sprintf("- %s: %s", template.name, template.description)) } } @@ -61,6 +67,9 @@ func nativeTemplateHelpDescriptions() string { func nativeTemplateOptions() []cmdio.Tuple { names := make([]cmdio.Tuple, 0, len(nativeTemplates)) for _, template := range nativeTemplates { + if template.hidden { + continue + } tuple := cmdio.Tuple{ Name: template.name, Id: template.description, diff --git a/libs/template/helpers.go b/libs/template/helpers.go index 537fadb1e..56710dfbd 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/auth" + "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/iam" ) @@ -29,6 +30,7 @@ type pair struct { var cachedUser *iam.User var cachedIsServicePrincipal *bool +var cachedCatalog *string func loadHelpers(ctx context.Context) template.FuncMap { w := root.WorkspaceClient(ctx) @@ -108,6 +110,25 @@ func loadHelpers(ctx context.Context) template.FuncMap { } return auth.GetShortUserName(cachedUser.UserName), nil }, + // Get the default workspace catalog. If there is no default, or if + // Unity Catalog is not enabled, return an empty string. + "default_catalog": func() (string, error) { + if cachedCatalog == nil { + metastore, err := w.Metastores.Current(ctx) + if err != nil { + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.ErrorCode == "METASTORE_DOES_NOT_EXIST" { + // Workspace doesn't have a metastore assigned, ignore error + empty_default := "" + cachedCatalog = &empty_default + return "", nil + } + return "", err + } + cachedCatalog = &metastore.DefaultCatalogName + } + return *cachedCatalog, nil + }, "is_service_principal": func() (bool, error) { if cachedIsServicePrincipal != nil { return *cachedIsServicePrincipal, nil diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index e541259e0..964159ec2 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -37,10 +37,10 @@ func assertFilePermissions(t *testing.T, path string, perm fs.FileMode) { assert.Equal(t, perm, info.Mode().Perm()) } -func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target string, isServicePrincipal bool, build bool, tempDir string) { +func assertBuiltinTemplateValid(t *testing.T, template string, settings map[string]any, target string, isServicePrincipal bool, build bool, tempDir string) { ctx := context.Background() - templatePath, err := prepareBuiltinTemplates("default-python", tempDir) + templatePath, err := prepareBuiltinTemplates(template, tempDir) require.NoError(t, err) libraryPath := filepath.Join(templatePath, "library") @@ -50,6 +50,9 @@ func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target st // Prepare helpers cachedUser = &iam.User{UserName: "user@domain.com"} + if isServicePrincipal { + cachedUser.UserName = "1d410060-a513-496f-a197-23cc82e5f46d" + } cachedIsServicePrincipal = &isServicePrincipal ctx = root.SetWorkspaceClient(ctx, w) helpers := loadHelpers(ctx) @@ -102,11 +105,13 @@ func TestPrepareBuiltInTemplatesWithRelativePaths(t *testing.T) { assert.Equal(t, "./default-python", dir) } -func TestBuiltinTemplateValid(t *testing.T) { +func TestBuiltinPythonTemplateValid(t *testing.T) { // Test option combinations options := []string{"yes", "no"} isServicePrincipal := false build := false + catalog := "hive_metastore" + cachedCatalog = &catalog for _, includeNotebook := range options { for _, includeDlt := range options { for _, includePython := range options { @@ -118,7 +123,7 @@ func TestBuiltinTemplateValid(t *testing.T) { "include_python": includePython, } tempDir := t.TempDir() - assertBuiltinTemplateValid(t, config, "dev", isServicePrincipal, build, tempDir) + assertBuiltinTemplateValid(t, "default-python", config, "dev", isServicePrincipal, build, tempDir) } } } @@ -140,10 +145,28 @@ func TestBuiltinTemplateValid(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(tempDir) - assertBuiltinTemplateValid(t, config, "prod", isServicePrincipal, build, tempDir) + assertBuiltinTemplateValid(t, "default-python", config, "prod", isServicePrincipal, build, tempDir) defer os.RemoveAll(tempDir) } +func TestBuiltinDbtTemplateValid(t *testing.T) { + for _, personal_schemas := range []string{"yes", "no"} { + for _, target := range []string{"dev", "prod"} { + for _, isServicePrincipal := range []bool{true, false} { + config := map[string]any{ + "project_name": "my_project", + "http_path": "/sql/1.0/warehouses/123", + "default_catalog": "hive_metastore", + "personal_schemas": personal_schemas, + "shared_schema": "lennart", + } + build := false + assertBuiltinTemplateValid(t, "dbt-sql", config, target, isServicePrincipal, build, t.TempDir()) + } + } + } +} + func TestRendererWithAssociatedTemplateInLibrary(t *testing.T) { tmpDir := t.TempDir() diff --git a/libs/template/templates/dbt-sql/README.md b/libs/template/templates/dbt-sql/README.md new file mode 100644 index 000000000..4ccacab10 --- /dev/null +++ b/libs/template/templates/dbt-sql/README.md @@ -0,0 +1,9 @@ +# dbt template + +This folder provides a template for using dbt-core with Databricks Asset Bundles. +It leverages dbt-core for local development and relies on Databricks Asset Bundles +for deployment (either manually or with CI/CD). In production, +dbt is executed using Databricks Workflows. + +* Learn more about the dbt and its standard project structure here: https://docs.getdbt.com/docs/build/projects. +* Learn more about Databricks Asset Bundles here: https://docs.databricks.com/en/dev-tools/bundles/index.html diff --git a/libs/template/templates/dbt-sql/databricks_template_schema.json b/libs/template/templates/dbt-sql/databricks_template_schema.json new file mode 100644 index 000000000..736b12325 --- /dev/null +++ b/libs/template/templates/dbt-sql/databricks_template_schema.json @@ -0,0 +1,53 @@ +{ + "welcome_message": "\nWelcome to the (EXPERIMENTAL) dbt template for Databricks Asset Bundles!", + "properties": { + "project_name": { + "type": "string", + "pattern": "^[A-Za-z_][A-Za-z0-9_]+$", + "pattern_match_failure_message": "Name must consist of letters, numbers, and underscores.", + "default": "dbt_project", + "description": "\nPlease provide a unique name for this project.\nproject_name", + "order": 1 + }, + "http_path": { + "type": "string", + "pattern": "^/sql/.\\../warehouses/[a-z0-9]+$", + "pattern_match_failure_message": "Path must be of the form /sql/1.0/warehouses/", + "description": " \nPlease provide the HTTP Path of the SQL warehouse you would like to use with dbt during development.\nYou can find this path by clicking on \"Connection details\" for your SQL warehouse.\nhttp_path [example: /sql/1.0/warehouses/abcdef1234567890]", + "order": 2 + }, + "default_catalog": { + "type": "string", + "default": "{{default_catalog}}", + "pattern": "^\\w*$", + "pattern_match_failure_message": "Invalid catalog name.", + "description": "\nPlease provide an initial catalog{{if eq (default_catalog) \"\"}} (leave blank when not using Unity Catalog){{end}}.\ndefault_catalog", + "order": 3 + }, + "personal_schemas": { + "type": "string", + "description": "\nWould you like to use a personal schema for each user working on this project? (e.g., 'catalog.{{short_name}}')\npersonal_schemas", + "enum": [ + "yes, use a schema based on the current user name during development", + "no, use a shared schema during development" + ], + "order": 4 + }, + "shared_schema": { + "skip_prompt_if": { + "properties": { + "personal_schemas": { + "const": "yes, use a schema based on the current user name during development" + } + } + }, + "type": "string", + "default": "default", + "pattern": "^\\w+$", + "pattern_match_failure_message": "Invalid schema name.", + "description": "\nPlease provide an initial schema during development.\ndefault_schema", + "order": 5 + } + }, + "success_message": "\n📊 Your new project has been created in the '{{.project_name}}' directory!\nIf you already have dbt installed, just type 'cd {{.project_name}}; dbt init' to get started.\nRefer to the README.md file for full \"getting started\" guide and production setup instructions.\n" +} diff --git a/libs/template/templates/dbt-sql/library/versions.tmpl b/libs/template/templates/dbt-sql/library/versions.tmpl new file mode 100644 index 000000000..f9a879d25 --- /dev/null +++ b/libs/template/templates/dbt-sql/library/versions.tmpl @@ -0,0 +1,7 @@ +{{define "latest_lts_dbr_version" -}} + 13.3.x-scala2.12 +{{- end}} + +{{define "latest_lts_db_connect_version_spec" -}} + >=13.3,<13.4 +{{- end}} diff --git a/libs/template/templates/dbt-sql/template/__preamble.tmpl b/libs/template/templates/dbt-sql/template/__preamble.tmpl new file mode 100644 index 000000000..b770b5ef9 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/__preamble.tmpl @@ -0,0 +1,9 @@ +# Preamble + +This file only template directives; it is skipped for the actual output. + +{{skip "__preamble"}} + +{{if eq .project_name "dbt"}} +{{fail "Project name 'dbt' is not supported"}} +{{end}} \ No newline at end of file diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/__builtins__.pyi b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/__builtins__.pyi new file mode 100644 index 000000000..0edd5181b --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/__builtins__.pyi @@ -0,0 +1,3 @@ +# Typings for Pylance in Visual Studio Code +# see https://github.com/microsoft/pyright/blob/main/docs/builtins.md +from databricks.sdk.runtime import * diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/extensions.json b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/extensions.json new file mode 100644 index 000000000..28fe943fd --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/extensions.json @@ -0,0 +1,6 @@ +{ + "recommendations": [ + "redhat.vscode-yaml", + "innoverio.vscode-dbt-power-user", + ] +} diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl new file mode 100644 index 000000000..562ba136f --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl @@ -0,0 +1,33 @@ +{ + "python.analysis.stubPath": ".vscode", + "databricks.python.envFile": "${workspaceFolder}/.env", + "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", + "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", + "python.testing.pytestArgs": [ + "." + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "python.analysis.extraPaths": ["src"], + "files.exclude": { + "**/*.egg-info": true, + "**/__pycache__": true, + ".pytest_cache": true, + }, + "python.envFile": "${workspaceFolder}/.databricks/.databricks.env", + "python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python", + "sqltools.connections": [ + { + "connectionMethod": "VS Code Extension (beta)", + "catalog": "hive_metastore", + "previewLimit": 50, + "driver": "Databricks", + "name": "databricks", + "path": "{{.http_path}}" + } + ], + "sqltools.autoConnectTo": "", + "[jinja-sql]": { + "editor.defaultFormatter": "innoverio.vscode-dbt-power-user" + } +} diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl new file mode 100644 index 000000000..d46b61f72 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl @@ -0,0 +1,138 @@ +# {{.project_name}} + +The '{{.project_name}}' project was generated by using the dbt template for +Databricks Asset Bundles. It follows the standard dbt project structure +and has an additional `resources` directory to define Databricks resources such as jobs +that run dbt models. + +* Learn more about dbt and its standard project structure here: https://docs.getdbt.com/docs/build/projects. +* Learn more about Databricks Asset Bundles here: https://docs.databricks.com/en/dev-tools/bundles/index.html + +The remainder of this file includes instructions for local development (using dbt) +and deployment to production (using Databricks Asset Bundles). + +## Development setup + +1. Install the Databricks CLI from https://docs.databricks.com/dev-tools/cli/databricks-cli.html + +2. Authenticate to your Databricks workspace, if you have not done so already: + ``` + $ databricks configure + ``` + +3. Install dbt + + To install dbt, you need a recent version of Python. For the instructions below, + we assume `python3` refers to the Python version you want to use. On some systems, + you may need to refer to a different Python version, e.g. `python` or `/usr/bin/python`. + + Run these instructions from the `{{.project_name}}` directory. We recommend making + use of a Python virtual environment and installing dbt as follows: + + ``` + $ python3 -m venv .venv + $ . .venv/bin/activate + $ pip install -r requirements-dev.txt + ``` + +4. Initialize your dbt profile + + Use `dbt init` to initialize your profile. + + ``` + $ dbt init + ``` + + Note that dbt authentication uses personal access tokens by default + (see https://docs.databricks.com/dev-tools/auth/pat.html). + You can use OAuth as an alternative, but this currently requires manual configuration. + See https://github.com/databricks/dbt-databricks/blob/main/docs/oauth.md + for general instructions, or https://community.databricks.com/t5/technical-blog/using-dbt-core-with-oauth-on-azure-databricks/ba-p/46605 + for advice on setting up OAuth for Azure Databricks. + + To setup up additional profiles, such as a 'prod' profile, + see https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles. + +5. Activate dbt so it can be used from the terminal + + ``` + $ . .venv/bin/activate + ``` + +## Local development with dbt + +Use `dbt` to [run this project locally using a SQL warehouse](https://docs.databricks.com/partners/prep/dbt.html): + +``` +$ dbt seed +$ dbt run +``` + +(Did you get an error that the dbt command could not be found? You may need +to try the last step from the development setup above to re-activate +your Python virtual environment!) + + +To just evaluate a single model defined in a file called orders.sql, use: + +``` +$ dbt run --model orders +``` + +Use `dbt test` to run tests generated from yml files such as `models/schema.yml` +and any SQL tests from `tests/` + +``` +$ dbt test +``` + +## Production setup + +Your production dbt profiles are defined in dbt_profiles/profiles.yml. +These profiles define the default catalog, schema, and any other +target-specific settings. Read more about dbt profiles on Databricks at +https://docs.databricks.com/en/workflows/jobs/how-to/use-dbt-in-workflows.html#advanced-run-dbt-with-a-custom-profile. + +The target workspaces for staging and prod are defined in databricks.yml. +You can manaully deploy based on these configurations (see below). +Or you can use CI/CD to automate deployment. See +https://docs.databricks.com/dev-tools/bundles/ci-cd.html for documentation +on CI/CD setup. + +## Manually deploying to Databricks with Databricks Asset Bundles + +Databricks Asset Bundles can be used to deploy to Databricks and to execute +dbt commands as a job using Databricks Workflows. See +https://docs.databricks.com/dev-tools/bundles/index.html to learn more. + +Use the Databricks CLI to deploy a development copy of this project to a workspace: + +``` +$ databricks bundle deploy --target dev +``` + +(Note that "dev" is the default target, so the `--target` parameter +is optional here.) + +This deploys everything that's defined for this project. +For example, the default template would deploy a job called +`[dev yourname] {{.project_name}}_job` to your workspace. +You can find that job by opening your workpace and clicking on **Workflows**. + +You can also deploy to your production target directly from the command-line. +The warehouse, catalog, and schema for that target are configured in databricks.yml. +When deploying to this target, note that the default job at resources/{{.project_name}}_job.yml +has a schedule set that runs every day. The schedule is paused when deploying in development mode +(see https://docs.databricks.com/dev-tools/bundles/deployment-modes.html). + +To deploy a production copy, type: + +``` +$ databricks bundle deploy --target prod +``` + +## IDE support + +Optionally, install developer tools such as the Databricks extension for Visual Studio Code from +https://docs.databricks.com/dev-tools/vscode-ext.html. Third-party extensions +related to dbt may further enhance your dbt development experience! diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl new file mode 100644 index 000000000..fdda03c0d --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl @@ -0,0 +1,32 @@ +# This file defines the structure of this project and how it is deployed +# to production using Databricks Asset Bundles. +# See https://docs.databricks.com/dev-tools/bundles/index.html for documentation. +bundle: + name: {{.project_name}} + +include: + - resources/*.yml + +# Deployment targets. +# The default schema, catalog, etc. for dbt are defined in dbt_profiles/profiles.yml +targets: + dev: + default: true + # We use 'mode: development' to indicate this is a personal development copy. + # Any job schedules and triggers are paused by default. + mode: development + workspace: + host: {{workspace_host}} + + prod: + mode: production + workspace: + host: {{workspace_host}} + # We always use /Users/{{user_name}} for all resources to make sure we only have a single copy. + root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target} + {{- if not is_service_principal}} + run_as: + # This runs as {{user_name}} in production. We could also use a service principal here + # using service_principal_name (see the Databricks documentation). + user_name: {{user_name}} + {{- end}} diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl new file mode 100644 index 000000000..d29bd55ce --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl @@ -0,0 +1,36 @@ +{{- $catalog := .default_catalog}} +{{- if eq .default_catalog ""}} +{{- $catalog = "\"\" # workspace default"}} +{{- end}} +# This file defines dbt profiles for deployed dbt jobs. +# Note that for local development you should create your own, local profile. +# (see README.md). +my_dbt_project: + target: dev # default target + outputs: + + dev: + type: databricks + method: http + catalog: {{$catalog}} + schema: "{{"{{"}} var('dev_schema') {{"}}"}}" + + http_path: {{.http_path}} + + # The workspace host / token are provided by Databricks + # see databricks.yml for the host used for 'dev' + host: "{{"{{"}} env_var('DBT_HOST') {{"}}"}}" + token: "{{"{{"}} env_var('DBT_ACCESS_TOKEN') {{"}}"}}" + + prod: + type: databricks + method: http + catalog: {{$catalog}} + schema: {{.shared_schema}} + + http_path: {{.http_path}} + + # The workspace host / token are provided by Databricks + # see databricks.yml for the host used for 'dev' + host: "{{"{{"}} env_var('DBT_HOST') {{"}}"}}" + token: "{{"{{"}} env_var('DBT_ACCESS_TOKEN') {{"}}"}}" diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_project.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_project.yml.tmpl new file mode 100644 index 000000000..11fbf051e --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_project.yml.tmpl @@ -0,0 +1,32 @@ +name: '{{.project_name}}' +version: '1.0.0' +config-version: 2 + +# This setting configures which "profile" dbt uses for this project. +profile: '{{.project_name}}' + +# These configurations specify where dbt should look for different types of files. +# For Databricks asset bundles, we put everything in src, as you may have +# non-dbt resources in your project. +model-paths: ["src/models"] +analysis-paths: ["src/analyses"] +test-paths: ["src/tests"] +seed-paths: ["src/seeds"] +macro-paths: ["src/macros"] +snapshot-paths: ["src/snapshots"] + +clean-targets: # directories to be removed by `dbt clean` + - "target" + - "dbt_packages" + +# Configuring models +# Full documentation: https://docs.getdbt.com/docs/configuring-models + +# In this example config, we tell dbt to build all models in the example/ +# directory as views by default. These settings can be overridden in the +# individual model files using the `{{"{{"}} config(...) {{"}}"}}` macro. +models: + {{.project_name}}: + # Config indicated by + and applies to all files under models/example/ + example: + +materialized: view diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/profile_template.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/profile_template.yml.tmpl new file mode 100644 index 000000000..1bab573f2 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/profile_template.yml.tmpl @@ -0,0 +1,37 @@ +# This file defines prompts with defaults for dbt initializaton. +# It is used when the `dbt init` command is invoked. +# +fixed: + type: databricks +prompts: + host: + default: {{(regexp "^https?://").ReplaceAllString workspace_host ""}} + token: + hint: 'personal access token to use, dapiXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' + hide_input: true + http_path: + hint: 'HTTP path of SQL warehouse to use' + default: {{.http_path}} + {{- if eq .default_catalog ""}} + _choose_unity_catalog: + 'use the default workspace catalog (or do not use Unity Catalog)': + _fixed_catalog: null + 'specify a default catalog': + catalog: + hint: 'initial catalog' + {{- else}} + catalog: + hint: 'initial catalog' + default: {{.default_catalog}} + {{- end}} + schema: + {{- if (regexp "^yes").MatchString .personal_schemas}} + hint: 'personal schema where dbt will build objects during development, example: {{short_name}}' + {{- else}} + hint: 'default schema where dbt will build objects' + default: {{.shared_schema}} + {{- end}} + threads: + hint: 'threads to use during development, 1 or more' + type: 'int' + default: 4 diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt b/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt new file mode 100644 index 000000000..10d7b9f10 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt @@ -0,0 +1,3 @@ +## requirements-dev.txt: dependencies for local development. + +dbt-databricks>=1.0.0,<2.0.0 diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl new file mode 100644 index 000000000..688c23b92 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl @@ -0,0 +1,45 @@ +resources: + jobs: + {{.project_name}}_job: + name: {{.project_name}}_job + + schedule: + # Run every day at 9:27 AM + quartz_cron_expression: 21 27 9 * * ? + timezone_id: UTC + + email_notifications: + on_failure: + - {{user_name}} + +{{- $dev_schema := .shared_schema }} +{{- if (regexp "^yes").MatchString .personal_schemas}} +{{- $dev_schema = "${workspace.current_user.short_name}"}} +{{- end}} + + tasks: + - task_key: dbt + + dbt_task: + project_directory: ../ + # The default schema, catalog, etc. are defined in ../dbt_profiles/profiles.yml + profiles_directory: dbt_profiles/ + commands: + - 'dbt deps --target=${bundle.target}' + - 'dbt seed --target=${bundle.target} --vars "{ dev_schema: {{$dev_schema}} }"' + - 'dbt run --target=${bundle.target} --vars "{ dev_schema: {{$dev_schema}} }"' + + libraries: + - pypi: + package: dbt-databricks>=1.0.0,<2.0.0 + + new_cluster: + spark_version: {{template "latest_lts_dbr_version"}} + node_type_id: {{smallest_node_type}} + data_security_mode: SINGLE_USER + num_workers: 0 + spark_conf: + spark.master: "local[*, 4]" + spark.databricks.cluster.profile: singleNode + custom_tags: + ResourceClass: SingleNode diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/analyses/.gitkeep b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/analyses/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/macros/.gitkeep b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/macros/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_daily.sql.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_daily.sql.tmpl new file mode 100644 index 000000000..a8b4c2f9a --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_daily.sql.tmpl @@ -0,0 +1,24 @@ +{{- if eq (default_catalog) ""}} +{{- /* This workspace might not have Unity Catalog, */}} +{{- /* so let's not show both materialized views and streaming tables. */}} +{{- /* They're not supported without Unity Catalog! */}} +-- This model file defines a table called 'orders_daily' +{{"{{"}} config(materialized = 'table') {{"}}"}} +{{- else}} +-- This model file defines a materialized view called 'orders_daily' +-- +-- Read more about materialized at https://docs.getdbt.com/reference/resource-configs/databricks-configs#materialized-views-and-streaming-tables +-- Current limitation: a "full refresh" is needed in case the definition below is changed; see https://github.com/databricks/dbt-databricks/issues/561. +{{"{{"}} config(materialized = 'materialized_view') {{"}}"}} +{{- end}} + +select order_date, count(*) AS number_of_orders + +from {{"{{"}} ref('orders_raw') {{"}}"}} + +-- During development, only process a smaller range of data +{% if target.name != 'prod' %} +where order_date >= '2019-08-01' and order_date < '2019-09-01' +{% endif %} + +group by order_date diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_raw.sql.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_raw.sql.tmpl new file mode 100644 index 000000000..17e6a5bf3 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_raw.sql.tmpl @@ -0,0 +1,16 @@ +-- This model file defines a streaming table called 'orders_raw' +-- +-- The streaming table below ingests all JSON files in /databricks-datasets/retail-org/sales_orders/ +-- Read more about streaming tables at https://docs.getdbt.com/reference/resource-configs/databricks-configs#materialized-views-and-streaming-tables +-- Current limitation: a "full refresh" is needed in case the definition below is changed; see https://github.com/databricks/dbt-databricks/issues/561. +{{"{{"}} config(materialized = 'streaming_table') {{"}}"}} + +select + customer_name, + date(timestamp(from_unixtime(try_cast(order_datetime as bigint)))) as order_date, + order_number +from stream read_files( + "/databricks-datasets/retail-org/sales_orders/", + format => "json", + header => true +) diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml new file mode 100644 index 000000000..d34b9e645 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml @@ -0,0 +1,21 @@ + +version: 2 + +models: + - name: orders_raw + description: "Raw ingested orders" + columns: + - name: customer_name + description: "The name of a customer" + tests: + - unique + - not_null + + - name: orders_daily + description: "Number of orders by day" + columns: + - name: order_date + description: "The date on which orders took place" + tests: + - unique + - not_null diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/seeds/.gitkeep b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/seeds/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/snapshots/.gitkeep b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/snapshots/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/tests/.gitkeep b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/tests/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl index 476c1cd6c..5adade0b3 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl @@ -6,7 +6,7 @@ The '{{.project_name}}' project was generated by using the default-python templa 1. Install the Databricks CLI from https://docs.databricks.com/dev-tools/cli/databricks-cli.html -2. Authenticate to your Databricks workspace: +2. Authenticate to your Databricks workspace, if you have not done so already: ``` $ databricks configure ``` @@ -28,6 +28,11 @@ The '{{.project_name}}' project was generated by using the default-python templa $ databricks bundle deploy --target prod ``` + Note that the default job from the template has a schedule that runs every day + (defined in resources/{{.project_name}}_job.yml). The schedule + is paused when deploying in development mode (see + https://docs.databricks.com/dev-tools/bundles/deployment-modes.html). + 5. To run a job or pipeline, use the "run" command: ``` $ databricks bundle run From a2a4948047e7e119ead809f59c80299900aeda32 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 19 Feb 2024 11:44:51 +0100 Subject: [PATCH 043/286] Allow use of variables references in primitive non-string fields (#1219) ## Changes This change enables the use of bundle variables for boolean, integer, and floating point fields. ## Tests * Unit tests. * I ran a manual test to confirm parameterizing the number of workers in a cluster definition works. --- .../mutator/resolve_variable_references.go | 16 ++- .../resolve_variable_references_test.go | 97 +++++++++++++++++++ libs/dyn/convert/from_typed.go | 16 +++ libs/dyn/convert/from_typed_test.go | 24 +++++ libs/dyn/convert/normalize.go | 16 +++ libs/dyn/convert/normalize_test.go | 24 +++++ libs/dyn/convert/to_typed.go | 16 +++ libs/dyn/convert/to_typed_test.go | 30 +++++- libs/dyn/dynvar/ref.go | 4 + libs/dyn/dynvar/ref_test.go | 7 ++ 10 files changed, 248 insertions(+), 2 deletions(-) diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go index a9ff70f68..1075e83e3 100644 --- a/bundle/config/mutator/resolve_variable_references.go +++ b/bundle/config/mutator/resolve_variable_references.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/convert" "github.com/databricks/cli/libs/dyn/dynvar" + "github.com/databricks/cli/libs/log" ) type resolveVariableReferences struct { @@ -58,7 +59,7 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) } // Resolve variable references in all values. - return dynvar.Resolve(root, func(path dyn.Path) (dyn.Value, error) { + root, err := dynvar.Resolve(root, func(path dyn.Path) (dyn.Value, error) { // Rewrite the shorthand path ${var.foo} into ${variables.foo.value}. if path.HasPrefix(varPath) && len(path) == 2 { path = dyn.NewPath( @@ -77,5 +78,18 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) return dyn.InvalidValue, dynvar.ErrSkipResolution }) + if err != nil { + return dyn.InvalidValue, err + } + + // Normalize the result because variable resolution may have been applied to non-string fields. + // For example, a variable reference may have been resolved to a integer. + root, diags := convert.Normalize(b.Config, root) + for _, diag := range diags { + // This occurs when a variable's resolved value is incompatible with the field's type. + // Log a warning until we have a better way to surface these diagnostics to the user. + log.Warnf(ctx, "normalization diagnostic: %s", diag.Summary) + } + return root, nil }) } diff --git a/bundle/config/mutator/resolve_variable_references_test.go b/bundle/config/mutator/resolve_variable_references_test.go index 1f253d41c..8190c360f 100644 --- a/bundle/config/mutator/resolve_variable_references_test.go +++ b/bundle/config/mutator/resolve_variable_references_test.go @@ -8,7 +8,10 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -95,3 +98,97 @@ func TestResolveVariableReferencesToEmptyFields(t *testing.T) { // The job settings should have been interpolated to an empty string. require.Equal(t, "", b.Config.Resources.Jobs["job1"].JobSettings.Tags["git_branch"]) } + +func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) { + var err error + + b := &bundle.Bundle{ + Config: config.Root{ + Variables: map[string]*variable.Variable{ + "no_alert_for_canceled_runs": {}, + "no_alert_for_skipped_runs": {}, + "min_workers": {}, + "max_workers": {}, + "spot_bid_max_price": {}, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + NotificationSettings: &jobs.JobNotificationSettings{ + NoAlertForCanceledRuns: false, + NoAlertForSkippedRuns: false, + }, + Tasks: []jobs.Task{ + { + NewCluster: &compute.ClusterSpec{ + Autoscale: &compute.AutoScale{ + MinWorkers: 0, + MaxWorkers: 0, + }, + AzureAttributes: &compute.AzureAttributes{ + SpotBidMaxPrice: 0.0, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Initialize the variables. + err = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + return b.Config.InitializeVariables([]string{ + "no_alert_for_canceled_runs=true", + "no_alert_for_skipped_runs=true", + "min_workers=1", + "max_workers=2", + "spot_bid_max_price=0.5", + }) + }) + require.NoError(t, err) + + // Assign the variables to the dynamic configuration. + err = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + // Set the notification settings. + p = dyn.MustPathFromString("resources.jobs.job1.notification_settings") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("no_alert_for_canceled_runs")), dyn.V("${var.no_alert_for_canceled_runs}")) + require.NoError(t, err) + v, err = dyn.SetByPath(v, p.Append(dyn.Key("no_alert_for_skipped_runs")), dyn.V("${var.no_alert_for_skipped_runs}")) + require.NoError(t, err) + + // Set the min and max workers. + p = dyn.MustPathFromString("resources.jobs.job1.tasks[0].new_cluster.autoscale") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("min_workers")), dyn.V("${var.min_workers}")) + require.NoError(t, err) + v, err = dyn.SetByPath(v, p.Append(dyn.Key("max_workers")), dyn.V("${var.max_workers}")) + require.NoError(t, err) + + // Set the spot bid max price. + p = dyn.MustPathFromString("resources.jobs.job1.tasks[0].new_cluster.azure_attributes") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("spot_bid_max_price")), dyn.V("${var.spot_bid_max_price}")) + require.NoError(t, err) + + return v, nil + }) + }) + require.NoError(t, err) + + // Apply for the variable prefix. This should resolve the variables to their values. + err = bundle.Apply(context.Background(), b, ResolveVariableReferences("variables")) + require.NoError(t, err) + assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForCanceledRuns) + assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForSkippedRuns) + assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MinWorkers) + assert.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MaxWorkers) + assert.Equal(t, 0.5, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.AzureAttributes.SpotBidMaxPrice) +} diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index 6dcca2b85..4778edb96 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -6,6 +6,7 @@ import ( "slices" "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/dynvar" ) type fromTypedOptions int @@ -185,6 +186,11 @@ func fromTypedBool(src reflect.Value, ref dyn.Value, options ...fromTypedOptions return dyn.NilValue, nil } return dyn.V(src.Bool()), nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } } return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) @@ -205,6 +211,11 @@ func fromTypedInt(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) return dyn.NilValue, nil } return dyn.V(src.Int()), nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } } return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) @@ -225,6 +236,11 @@ func fromTypedFloat(src reflect.Value, ref dyn.Value, options ...fromTypedOption return dyn.NilValue, nil } return dyn.V(src.Float()), nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } } return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index 5fc2b90f6..f7e97fc7e 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -495,6 +495,14 @@ func TestFromTypedBoolRetainsLocationsIfUnchanged(t *testing.T) { assert.Equal(t, dyn.NewValue(true, dyn.Location{File: "foo"}), nv) } +func TestFromTypedBoolVariableReference(t *testing.T) { + var src bool = true + var ref = dyn.V("${var.foo}") + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V("${var.foo}"), nv) +} + func TestFromTypedBoolTypeError(t *testing.T) { var src bool = true var ref = dyn.V("string") @@ -542,6 +550,14 @@ func TestFromTypedIntRetainsLocationsIfUnchanged(t *testing.T) { assert.Equal(t, dyn.NewValue(1234, dyn.Location{File: "foo"}), nv) } +func TestFromTypedIntVariableReference(t *testing.T) { + var src int = 1234 + var ref = dyn.V("${var.foo}") + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V("${var.foo}"), nv) +} + func TestFromTypedIntTypeError(t *testing.T) { var src int = 1234 var ref = dyn.V("string") @@ -589,6 +605,14 @@ func TestFromTypedFloatRetainsLocationsIfUnchanged(t *testing.T) { assert.Equal(t, dyn.NewValue(1.23, dyn.Location{File: "foo"}), nv) } +func TestFromTypedFloatVariableReference(t *testing.T) { + var src float64 = 1.23 + var ref = dyn.V("${var.foo}") + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V("${var.foo}"), nv) +} + func TestFromTypedFloatTypeError(t *testing.T) { var src float64 = 1.23 var ref = dyn.V("string") diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index e0dfbda23..d6539be95 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/dynvar" ) // NormalizeOption is the type for options that can be passed to Normalize. @@ -245,6 +246,11 @@ func (n normalizeOptions) normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Va case "false", "n", "N", "no", "No", "NO", "off", "Off", "OFF": out = false default: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } + // Cannot interpret as a boolean. return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src)) } @@ -266,6 +272,11 @@ func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value) (dyn.Val var err error out, err = strconv.ParseInt(src.MustString(), 10, 64) if err != nil { + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } + return dyn.InvalidValue, diags.Append(diag.Diagnostic{ Severity: diag.Error, Summary: fmt.Sprintf("cannot parse %q as an integer", src.MustString()), @@ -290,6 +301,11 @@ func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value) (dyn.V var err error out, err = strconv.ParseFloat(src.MustString(), 64) if err != nil { + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } + return dyn.InvalidValue, diags.Append(diag.Diagnostic{ Severity: diag.Error, Summary: fmt.Sprintf("cannot parse %q as a floating point number", src.MustString()), diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 82abc8260..a2a6038e4 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -490,6 +490,14 @@ func TestNormalizeBoolFromString(t *testing.T) { } } +func TestNormalizeBoolFromStringVariableReference(t *testing.T) { + var typ bool + vin := dyn.V("${var.foo}") + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + func TestNormalizeBoolFromStringError(t *testing.T) { var typ bool vin := dyn.V("abc") @@ -542,6 +550,14 @@ func TestNormalizeIntFromString(t *testing.T) { assert.Equal(t, dyn.V(int64(123)), vout) } +func TestNormalizeIntFromStringVariableReference(t *testing.T) { + var typ int + vin := dyn.V("${var.foo}") + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + func TestNormalizeIntFromStringError(t *testing.T) { var typ int vin := dyn.V("abc") @@ -594,6 +610,14 @@ func TestNormalizeFloatFromString(t *testing.T) { assert.Equal(t, dyn.V(1.2), vout) } +func TestNormalizeFloatFromStringVariableReference(t *testing.T) { + var typ float64 + vin := dyn.V("${var.foo}") + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + func TestNormalizeFloatFromStringError(t *testing.T) { var typ float64 vin := dyn.V("abc") diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index 715d3f670..aeaaa9bea 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -6,6 +6,7 @@ import ( "strconv" "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/dynvar" ) func ToTyped(dst any, src dyn.Value) error { @@ -195,6 +196,11 @@ func toTypedBool(dst reflect.Value, src dyn.Value) error { dst.SetBool(false) return nil } + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -213,6 +219,11 @@ func toTypedInt(dst reflect.Value, src dyn.Value) error { dst.SetInt(i64) return nil } + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -231,6 +242,11 @@ func toTypedFloat(dst reflect.Value, src dyn.Value) error { dst.SetFloat(f64) return nil } + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index fd399b934..a7c4a6f08 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -355,10 +355,17 @@ func TestToTypedBoolFromString(t *testing.T) { } // Other - err := ToTyped(&out, dyn.V("${var.foo}")) + err := ToTyped(&out, dyn.V("some other string")) require.Error(t, err) } +func TestToTypedBoolFromStringVariableReference(t *testing.T) { + var out bool = true + err := ToTyped(&out, dyn.V("${var.foo}")) + require.NoError(t, err) + assert.Equal(t, false, out) +} + func TestToTypedInt(t *testing.T) { var out int err := ToTyped(&out, dyn.V(1234)) @@ -414,6 +421,13 @@ func TestToTypedIntFromStringInt(t *testing.T) { assert.Equal(t, int(123), out) } +func TestToTypedIntFromStringVariableReference(t *testing.T) { + var out int = 123 + err := ToTyped(&out, dyn.V("${var.foo}")) + require.NoError(t, err) + assert.Equal(t, int(0), out) +} + func TestToTypedFloat32(t *testing.T) { var out float32 err := ToTyped(&out, dyn.V(float32(1.0))) @@ -467,3 +481,17 @@ func TestToTypedFloat64FromString(t *testing.T) { require.NoError(t, err) assert.Equal(t, float64(1.2), out) } + +func TestToTypedFloat32FromStringVariableReference(t *testing.T) { + var out float32 = 1.0 + err := ToTyped(&out, dyn.V("${var.foo}")) + require.NoError(t, err) + assert.Equal(t, float32(0.0), out) +} + +func TestToTypedFloat64FromStringVariableReference(t *testing.T) { + var out float64 = 1.0 + err := ToTyped(&out, dyn.V("${var.foo}")) + require.NoError(t, err) + assert.Equal(t, float64(0.0), out) +} diff --git a/libs/dyn/dynvar/ref.go b/libs/dyn/dynvar/ref.go index e4616c520..a2047032a 100644 --- a/libs/dyn/dynvar/ref.go +++ b/libs/dyn/dynvar/ref.go @@ -67,3 +67,7 @@ func (v ref) references() []string { } return out } + +func IsPureVariableReference(s string) bool { + return len(s) > 0 && re.FindString(s) == s +} diff --git a/libs/dyn/dynvar/ref_test.go b/libs/dyn/dynvar/ref_test.go index b3066276c..092237368 100644 --- a/libs/dyn/dynvar/ref_test.go +++ b/libs/dyn/dynvar/ref_test.go @@ -44,3 +44,10 @@ func TestNewRefInvalidPattern(t *testing.T) { require.False(t, ok, "should not match invalid pattern: %s", v) } } + +func TestIsPureVariableReference(t *testing.T) { + assert.False(t, IsPureVariableReference("")) + assert.False(t, IsPureVariableReference("${foo.bar} suffix")) + assert.False(t, IsPureVariableReference("prefix ${foo.bar}")) + assert.True(t, IsPureVariableReference("${foo.bar}")) +} From 162b115e193765a3c922146ebfd7e448e64c2b50 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 19 Feb 2024 13:01:11 +0100 Subject: [PATCH 044/286] Add an experimental default-sql template (#1051) ## Changes This adds a `default-sql` template! In this latest revision, I've hidden the new template from the list so we can merge it, iterate over it, and properly release the template at the right time. - [x] WorkspaceFS support for .sql files is in prod - [x] SQL extension is preconfigured based on extension settings (if possible) - [ ] Streaming tables support is either ungated or the template provides instructions about signup - _Mitigation for now: this template is hidden from the list of templates._ - [x] Support non-UC workspaces ## Tests - [x] Unit tests - [x] Manual testing - [x] More manual testing - [x] Reviewer testing --------- Co-authored-by: Pieter Noordhuis Co-authored-by: PaulCornellDB --- cmd/bundle/init.go | 5 ++ libs/template/renderer_test.go | 20 +++++- .../dbt-sql/databricks_template_schema.json | 4 +- .../{{.project_name}}/databricks.yml.tmpl | 2 +- libs/template/templates/default-sql/README.md | 3 + .../databricks_template_schema.json | 53 ++++++++++++++ .../default-sql/library/versions.tmpl | 7 ++ .../{{.project_name}}/.vscode/extensions.json | 7 ++ .../.vscode/settings.json.tmpl | 28 ++++++++ .../template/{{.project_name}}/README.md.tmpl | 41 +++++++++++ .../{{.project_name}}/databricks.yml.tmpl | 71 +++++++++++++++++++ .../{{.project_name}}_sql_job.yml.tmpl | 43 +++++++++++ .../{{.project_name}}/scratch/README.md | 4 ++ .../scratch/exploration.ipynb.tmpl | 35 +++++++++ .../src/orders_daily.sql.tmpl | 14 ++++ .../{{.project_name}}/src/orders_raw.sql.tmpl | 16 +++++ 16 files changed, 349 insertions(+), 4 deletions(-) create mode 100644 libs/template/templates/default-sql/README.md create mode 100644 libs/template/templates/default-sql/databricks_template_schema.json create mode 100644 libs/template/templates/default-sql/library/versions.tmpl create mode 100644 libs/template/templates/default-sql/template/{{.project_name}}/.vscode/extensions.json create mode 100644 libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl create mode 100644 libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl create mode 100644 libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl create mode 100644 libs/template/templates/default-sql/template/{{.project_name}}/resources/{{.project_name}}_sql_job.yml.tmpl create mode 100644 libs/template/templates/default-sql/template/{{.project_name}}/scratch/README.md create mode 100644 libs/template/templates/default-sql/template/{{.project_name}}/scratch/exploration.ipynb.tmpl create mode 100644 libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl create mode 100644 libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 306e29038..704bad64d 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -35,6 +35,11 @@ var nativeTemplates = []nativeTemplate{ name: "default-python", description: "The default Python template for Notebooks / Delta Live Tables / Workflows", }, + { + name: "default-sql", + description: "The default SQL template for .sql files that run with Databricks SQL", + hidden: true, + }, { name: "dbt-sql", description: "The dbt SQL template (https://www.databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks)", diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 964159ec2..dc287440c 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -109,9 +109,9 @@ func TestBuiltinPythonTemplateValid(t *testing.T) { // Test option combinations options := []string{"yes", "no"} isServicePrincipal := false - build := false catalog := "hive_metastore" cachedCatalog = &catalog + build := false for _, includeNotebook := range options { for _, includeDlt := range options { for _, includePython := range options { @@ -149,6 +149,24 @@ func TestBuiltinPythonTemplateValid(t *testing.T) { defer os.RemoveAll(tempDir) } +func TestBuiltinSQLTemplateValid(t *testing.T) { + for _, personal_schemas := range []string{"yes", "no"} { + for _, target := range []string{"dev", "prod"} { + for _, isServicePrincipal := range []bool{true, false} { + config := map[string]any{ + "project_name": "my_project", + "http_path": "/sql/1.0/warehouses/123abc", + "default_catalog": "users", + "shared_schema": "lennart", + "personal_schemas": personal_schemas, + } + build := false + assertBuiltinTemplateValid(t, "default-sql", config, target, isServicePrincipal, build, t.TempDir()) + } + } + } +} + func TestBuiltinDbtTemplateValid(t *testing.T) { for _, personal_schemas := range []string{"yes", "no"} { for _, target := range []string{"dev", "prod"} { diff --git a/libs/template/templates/dbt-sql/databricks_template_schema.json b/libs/template/templates/dbt-sql/databricks_template_schema.json index 736b12325..7b39f6187 100644 --- a/libs/template/templates/dbt-sql/databricks_template_schema.json +++ b/libs/template/templates/dbt-sql/databricks_template_schema.json @@ -3,8 +3,8 @@ "properties": { "project_name": { "type": "string", - "pattern": "^[A-Za-z_][A-Za-z0-9_]+$", - "pattern_match_failure_message": "Name must consist of letters, numbers, and underscores.", + "pattern": "^[A-Za-z_][A-Za-z0-9-_]+$", + "pattern_match_failure_message": "Name must consist of letters, numbers, dashes, and underscores.", "default": "dbt_project", "description": "\nPlease provide a unique name for this project.\nproject_name", "order": 1 diff --git a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl index ea432f8db..e3572326b 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl @@ -19,7 +19,7 @@ targets: host: {{workspace_host}} ## Optionally, there could be a 'staging' target here. - ## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/index.html.) + ## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/ci-cd.html.) # # staging: # workspace: diff --git a/libs/template/templates/default-sql/README.md b/libs/template/templates/default-sql/README.md new file mode 100644 index 000000000..6b7140f07 --- /dev/null +++ b/libs/template/templates/default-sql/README.md @@ -0,0 +1,3 @@ +# sql template + +This folder provides a template for using SQL with Databricks Asset Bundles. diff --git a/libs/template/templates/default-sql/databricks_template_schema.json b/libs/template/templates/default-sql/databricks_template_schema.json new file mode 100644 index 000000000..b7a42e198 --- /dev/null +++ b/libs/template/templates/default-sql/databricks_template_schema.json @@ -0,0 +1,53 @@ +{ + "welcome_message": "\nWelcome to the (EXPERIMENTAL) default SQL template for Databricks Asset Bundles!", + "properties": { + "project_name": { + "type": "string", + "default": "sql_project", + "description": "\nPlease provide a unique name for this project.\nproject_name", + "order": 1, + "pattern": "^[A-Za-z_][A-Za-z0-9-_]+$", + "pattern_match_failure_message": "Name must consist of letters, numbers, dashes, and underscores." + }, + "http_path": { + "type": "string", + "pattern": "^/sql/.\\../warehouses/[a-z0-9]+$", + "pattern_match_failure_message": "Path must be of the form /sql/1.0/warehouses/", + "description": "\nPlease provide the HTTP Path of the SQL warehouse you would like to use with dbt during development.\nYou can find this path by clicking on \"Connection details\" for your SQL warehouse.\nhttp_path [example: /sql/1.0/warehouses/abcdef1234567890]", + "order": 2 + }, + "default_catalog": { + "type": "string", + "default": "{{if eq (default_catalog) \"\"}}hive_metastore{{else}}{{default_catalog}}{{end}}", + "pattern": "^\\w*$", + "pattern_match_failure_message": "Invalid catalog name.", + "description": "\nPlease provide an initial catalog{{if eq (default_catalog) \"\"}} or metastore{{end}}.\ndefault_catalog", + "order": 3 + }, + "personal_schemas": { + "type": "string", + "description": "\nWould you like to use a personal schema for each user working on this project? (e.g., 'catalog.{{short_name}}')\npersonal_schemas", + "enum": [ + "yes, automatically use a schema based on the current user name during development", + "no, use a single schema for all users during development" + ], + "order": 4 + }, + "shared_schema": { + "skip_prompt_if": { + "properties": { + "personal_schemas": { + "const": "yes, automatically use a schema based on the current user name during development" + } + } + }, + "type": "string", + "default": "default", + "pattern": "^\\w+$", + "pattern_match_failure_message": "Invalid schema name.", + "description": "\nPlease provide an initial schema during development.\ndefault_schema", + "order": 5 + } + }, + "success_message": "\n✨ Your new project has been created in the '{{.project_name}}' directory!\n\nPlease refer to the README.md file for \"getting started\" instructions.\nSee also the documentation at https://docs.databricks.com/dev-tools/bundles/index.html." +} diff --git a/libs/template/templates/default-sql/library/versions.tmpl b/libs/template/templates/default-sql/library/versions.tmpl new file mode 100644 index 000000000..f9a879d25 --- /dev/null +++ b/libs/template/templates/default-sql/library/versions.tmpl @@ -0,0 +1,7 @@ +{{define "latest_lts_dbr_version" -}} + 13.3.x-scala2.12 +{{- end}} + +{{define "latest_lts_db_connect_version_spec" -}} + >=13.3,<13.4 +{{- end}} diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/extensions.json b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/extensions.json new file mode 100644 index 000000000..8e1023465 --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/extensions.json @@ -0,0 +1,7 @@ +{ + "recommendations": [ + "databricks.databricks", + "redhat.vscode-yaml", + "databricks.sqltools-databricks-driver", + ] +} diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl new file mode 100644 index 000000000..c63af24b4 --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl @@ -0,0 +1,28 @@ +{ + "python.analysis.stubPath": ".vscode", + "databricks.python.envFile": "${workspaceFolder}/.env", + "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", + "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", + "python.testing.pytestArgs": [ + "." + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "python.analysis.extraPaths": ["src"], + "files.exclude": { + "**/*.egg-info": true, + "**/__pycache__": true, + ".pytest_cache": true, + }, + "sqltools.connections": [ + { + "connectionMethod": "VS Code Extension (beta)", + "catalog": "{{.default_catalog}}", + "previewLimit": 50, + "driver": "Databricks", + "name": "databricks", + "path": "{{.http_path}}" + } + ], + "sqltools.autoConnectTo": "", +} diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl new file mode 100644 index 000000000..e5c44320d --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl @@ -0,0 +1,41 @@ +# {{.project_name}} + +The '{{.project_name}}' project was generated by using the default-sql template. + +## Getting started + +1. Install the Databricks CLI from https://docs.databricks.com/dev-tools/cli/install.html + +2. Authenticate to your Databricks workspace (if you have not done so already): + ``` + $ databricks configure + ``` + +3. To deploy a development copy of this project, type: + ``` + $ databricks bundle deploy --target dev + ``` + (Note that "dev" is the default target, so the `--target` parameter + is optional here.) + + This deploys everything that's defined for this project. + For example, the default template would deploy a job called + `[dev yourname] {{.project_name}}_job` to your workspace. + You can find that job by opening your workpace and clicking on **Workflows**. + +4. Similarly, to deploy a production copy, type: + ``` + $ databricks bundle deploy --target prod + ``` + +5. To run a job, use the "run" command: + ``` + $ databricks bundle run + ``` + +6. Optionally, install developer tools such as the Databricks extension for Visual Studio Code from + https://docs.databricks.com/dev-tools/vscode-ext.html. + +7. For documentation on the Databricks Asset Bundles format used + for this project, and for CI/CD configuration, see + https://docs.databricks.com/dev-tools/bundles/index.html. diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl new file mode 100644 index 000000000..a47fb7c19 --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl @@ -0,0 +1,71 @@ +# This is a Databricks asset bundle definition for {{.project_name}}. +# See https://docs.databricks.com/dev-tools/bundles/index.html for documentation. +bundle: + name: {{.project_name}} + +include: + - resources/*.yml + +# Variable declarations. These variables are assigned in the dev/prod targets below. +variables: + warehouse_id: + description: The warehouse to use + catalog: + description: The catalog to use + schema: + description: The schema to use + +{{- $dev_schema := .shared_schema }} +{{- $prod_schema := .shared_schema }} +{{- if (regexp "^yes").MatchString .personal_schemas}} +{{- $dev_schema = "${workspace.current_user.short_name}"}} +{{- $prod_schema = "default"}} +{{- end}} + +# Deployment targets. +targets: + # The 'dev' target, for development purposes. This target is the default. + dev: + # We use 'mode: development' to indicate this is a personal development copy. + # Any job schedules and triggers are paused by default + mode: development + default: true + workspace: + host: {{workspace_host}} + variables: + warehouse_id: {{index ((regexp "[^/]+$").FindStringSubmatch .http_path) 0}} + catalog: {{.default_catalog}} + schema: {{$dev_schema}} + + ## Optionally, there could be a 'staging' target here. + ## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/ci-cd.html.) + # + # staging: + # workspace: + # host: {{workspace_host}} + + # The 'prod' target, used for production deployment. + prod: + # We use 'mode: production' to indicate this is a production deployment. + # Doing so enables strict verification of the settings below. + mode: production + workspace: + host: {{workspace_host}} + # We always use /Users/{{user_name}} for all resources to make sure we only have a single copy. + {{- /* + Internal note 2023-12: CLI versions v0.211.0 and before would show an error when using `mode: production` + with a path that doesn't say "/Shared". For now, we'll include an extra comment in the template + to explain that customers should update if they see this. + */}} + # If this path results in an error, please make sure you have a recent version of the CLI installed. + root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target} + variables: + warehouse_id: {{index ((regexp "[^/]+$").FindStringSubmatch .http_path) 0}} + catalog: {{.default_catalog}} + schema: {{$prod_schema}} + {{- if not is_service_principal}} + run_as: + # This runs as {{user_name}} in production. We could also use a service principal here + # using service_principal_name (see https://docs.databricks.com/en/dev-tools/bundles/permissions.html). + user_name: {{user_name}} + {{end -}} diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/resources/{{.project_name}}_sql_job.yml.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/resources/{{.project_name}}_sql_job.yml.tmpl new file mode 100644 index 000000000..31d2d21a9 --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/resources/{{.project_name}}_sql_job.yml.tmpl @@ -0,0 +1,43 @@ +# A job running SQL queries on a SQL warehouse +resources: + jobs: + {{.project_name}}_sql_job: + name: {{.project_name}}_sql_job + + schedule: + # Run every day at 7:17 AM + quartz_cron_expression: '44 17 7 * * ?' + timezone_id: Europe/Amsterdam + + {{- if not is_service_principal}} + + email_notifications: + on_failure: + - {{user_name}} + + {{else}} + + {{end -}} + + parameters: + - name: catalog + default: ${var.catalog} + - name: schema + default: ${var.schema} + - name: bundle_target + default: ${bundle.target} + + tasks: + - task_key: orders_raw + sql_task: + warehouse_id: ${var.warehouse_id} + file: + path: ../src/orders_raw.sql + + - task_key: orders_daily + depends_on: + - task_key: orders_raw + sql_task: + warehouse_id: ${var.warehouse_id} + file: + path: ../src/orders_daily.sql diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/scratch/README.md b/libs/template/templates/default-sql/template/{{.project_name}}/scratch/README.md new file mode 100644 index 000000000..5350d09cf --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/scratch/README.md @@ -0,0 +1,4 @@ +# scratch + +This folder is reserved for personal, exploratory notebooks and SQL files. +By default these are not committed to Git, as 'scratch' is listed in .gitignore. diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/scratch/exploration.ipynb.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/scratch/exploration.ipynb.tmpl new file mode 100644 index 000000000..becee5fba --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/scratch/exploration.ipynb.tmpl @@ -0,0 +1,35 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "dc8c630c-1ea0-42e4-873f-e4dec4d3d416", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "%sql\n", + "SELECT * FROM json.`/databricks-datasets/nyctaxi/sample/json/`" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 2 + }, + "notebookName": "exploration", + "widgets": {} + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl new file mode 100644 index 000000000..76ecadd3e --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl @@ -0,0 +1,14 @@ +-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml) +{{- /* We can't use a materialized view here since they don't support 'create or refresh yet.*/}} + +CREATE OR REPLACE VIEW + IDENTIFIER(CONCAT({{"{{"}}catalog{{"}}"}}, '.', {{"{{"}}schema{{"}}"}}, '.', 'orders_daily')) +AS SELECT + order_date, count(*) AS number_of_orders +FROM + IDENTIFIER(CONCAT({{"{{"}}catalog{{"}}"}}, '.', {{"{{"}}schema{{"}}"}}, '.', 'orders_raw')) + +-- During development, only process a smaller range of data +WHERE {{"{{"}}bundle_target{{"}}"}} == "prod" OR (order_date >= '2019-08-01' AND order_date < '2019-09-01') + +GROUP BY order_date diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl new file mode 100644 index 000000000..96769062b --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl @@ -0,0 +1,16 @@ +-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml) +-- +-- The streaming table below ingests all JSON files in /databricks-datasets/retail-org/sales_orders/ +-- See also https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-streaming-table.html + +CREATE OR REFRESH STREAMING TABLE + IDENTIFIER(CONCAT({{"{{"}}catalog{{"}}"}}, '.', {{"{{"}}schema{{"}}"}}, '.', 'orders_raw')) +AS SELECT + customer_name, + DATE(TIMESTAMP(FROM_UNIXTIME(TRY_CAST(order_datetime AS BIGINT)))) AS order_date, + order_number +FROM STREAM READ_FILES( + "/databricks-datasets/retail-org/sales_orders/", + format => "json", + header => true +) From d9f34e6b2270a21e72147909bfbc38b56c8db4b7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 14:30:06 +0000 Subject: [PATCH 045/286] Bump github.com/databricks/databricks-sdk-go from 0.32.0 to 0.33.0 (#1222) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.32.0 to 0.33.0.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.33.0

Internal Changes:

  • Add helper function to get header fields (#822).
  • Add Int64 to header type injection (#819).

API Changes:

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

0.33.0

Internal Changes:

  • Add helper function to get header fields (#822).
  • Add Int64 to header type injection (#819).

API Changes:

OpenAPI SHA: cdd76a98a4fca7008572b3a94427566dd286c63b, Date: 2024-02-19

Commits

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.32.0&new-version=0.33.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + bundle/schema/docs/bundle_descriptions.json | 394 ++++++++++-------- cmd/workspace/cmd.go | 2 + .../lakehouse-monitors/lakehouse-monitors.go | 12 +- cmd/workspace/online-tables/online-tables.go | 238 +++++++++++ go.mod | 2 +- go.sum | 4 +- 8 files changed, 470 insertions(+), 185 deletions(-) create mode 100755 cmd/workspace/online-tables/online-tables.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index bf3a5ea97..013e5ffe8 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -c40670f5a2055c92cf0a6aac92a5bccebfb80866 \ No newline at end of file +cdd76a98a4fca7008572b3a94427566dd286c63b \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index fe33227a7..09aac5e75 100755 --- a/.gitattributes +++ b/.gitattributes @@ -56,6 +56,7 @@ cmd/workspace/libraries/libraries.go linguist-generated=true cmd/workspace/metastores/metastores.go linguist-generated=true cmd/workspace/model-registry/model-registry.go linguist-generated=true cmd/workspace/model-versions/model-versions.go linguist-generated=true +cmd/workspace/online-tables/online-tables.go linguist-generated=true cmd/workspace/permissions/permissions.go linguist-generated=true cmd/workspace/pipelines/pipelines.go linguist-generated=true cmd/workspace/policy-families/policy-families.go linguist-generated=true diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 228f6e13f..5b63bb6d2 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -322,7 +322,7 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -785,7 +785,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -930,7 +930,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -1269,7 +1269,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" } } }, @@ -1371,7 +1371,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" } } }, @@ -1449,7 +1449,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" } } }, @@ -1672,97 +1672,92 @@ "external_model": { "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n", "properties": { - "config": { - "description": "The config for the external model, which must match the provider.", + "ai21labs_config": { + "description": "AI21Labs Config. Only required if the provider is 'ai21labs'.", "properties": { - "ai21labs_config": { - "description": "AI21Labs Config", - "properties": { - "ai21labs_api_key": { - "description": "The Databricks secret key reference for an AI21Labs API key." - } - } + "ai21labs_api_key": { + "description": "The Databricks secret key reference for an AI21Labs API key." + } + } + }, + "anthropic_config": { + "description": "Anthropic Config. Only required if the provider is 'anthropic'.", + "properties": { + "anthropic_api_key": { + "description": "The Databricks secret key reference for an Anthropic API key." + } + } + }, + "aws_bedrock_config": { + "description": "AWS Bedrock Config. Only required if the provider is 'aws-bedrock'.", + "properties": { + "aws_access_key_id": { + "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." }, - "anthropic_config": { - "description": "Anthropic Config", - "properties": { - "anthropic_api_key": { - "description": "The Databricks secret key reference for an Anthropic API key." - } - } + "aws_region": { + "description": "The AWS region to use. Bedrock has to be enabled there." }, - "aws_bedrock_config": { - "description": "AWS Bedrock Config", - "properties": { - "aws_access_key_id": { - "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." - }, - "aws_region": { - "description": "The AWS region to use. Bedrock has to be enabled there." - }, - "aws_secret_access_key": { - "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." - }, - "bedrock_provider": { - "description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." - } - } + "aws_secret_access_key": { + "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." }, - "cohere_config": { - "description": "Cohere Config", - "properties": { - "cohere_api_key": { - "description": "The Databricks secret key reference for a Cohere API key." - } - } + "bedrock_provider": { + "description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." + } + } + }, + "cohere_config": { + "description": "Cohere Config. Only required if the provider is 'cohere'.", + "properties": { + "cohere_api_key": { + "description": "The Databricks secret key reference for a Cohere API key." + } + } + }, + "databricks_model_serving_config": { + "description": "Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'.", + "properties": { + "databricks_api_token": { + "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" }, - "databricks_model_serving_config": { - "description": "Databricks Model Serving Config", - "properties": { - "databricks_api_token": { - "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" - }, - "databricks_workspace_url": { - "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" - } - } - }, - "openai_config": { - "description": "OpenAI Config", - "properties": { - "openai_api_base": { - "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" - }, - "openai_api_key": { - "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." - }, - "openai_api_type": { - "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" - }, - "openai_api_version": { - "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n" - }, - "openai_deployment_name": { - "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n" - }, - "openai_organization": { - "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n" - } - } - }, - "palm_config": { - "description": "PaLM Config", - "properties": { - "palm_api_key": { - "description": "The Databricks secret key reference for a PaLM API key." - } - } + "databricks_workspace_url": { + "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" } } }, "name": { "description": "The name of the external model." }, + "openai_config": { + "description": "OpenAI Config. Only required if the provider is 'openai'.", + "properties": { + "openai_api_base": { + "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" + }, + "openai_api_key": { + "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." + }, + "openai_api_type": { + "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" + }, + "openai_api_version": { + "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n" + }, + "openai_deployment_name": { + "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n" + }, + "openai_organization": { + "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n" + } + } + }, + "palm_config": { + "description": "PaLM Config. Only required if the provider is 'palm'.", + "properties": { + "palm_api_key": { + "description": "The Databricks secret key reference for a PaLM API key." + } + } + }, "provider": { "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" }, @@ -1774,6 +1769,12 @@ "instance_profile_arn": { "description": "ARN of the instance profile that the served entity uses to access AWS resources." }, + "max_provisioned_throughput": { + "description": "The maximum tokens per second that the endpoint can scale up to." + }, + "min_provisioned_throughput": { + "description": "The minimum tokens per second that the endpoint can scale down to." + }, "name": { "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.\nIf not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other\nentities, it defaults to \u003centity-name\u003e-\u003centity-version\u003e.\n" }, @@ -2854,7 +2855,7 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -3317,7 +3318,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -3462,7 +3463,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -3801,7 +3802,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" } } }, @@ -3903,7 +3904,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" } } }, @@ -3981,7 +3982,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" } } }, @@ -4204,97 +4205,92 @@ "external_model": { "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n", "properties": { - "config": { - "description": "The config for the external model, which must match the provider.", + "ai21labs_config": { + "description": "AI21Labs Config. Only required if the provider is 'ai21labs'.", "properties": { - "ai21labs_config": { - "description": "AI21Labs Config", - "properties": { - "ai21labs_api_key": { - "description": "The Databricks secret key reference for an AI21Labs API key." - } - } + "ai21labs_api_key": { + "description": "The Databricks secret key reference for an AI21Labs API key." + } + } + }, + "anthropic_config": { + "description": "Anthropic Config. Only required if the provider is 'anthropic'.", + "properties": { + "anthropic_api_key": { + "description": "The Databricks secret key reference for an Anthropic API key." + } + } + }, + "aws_bedrock_config": { + "description": "AWS Bedrock Config. Only required if the provider is 'aws-bedrock'.", + "properties": { + "aws_access_key_id": { + "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." }, - "anthropic_config": { - "description": "Anthropic Config", - "properties": { - "anthropic_api_key": { - "description": "The Databricks secret key reference for an Anthropic API key." - } - } + "aws_region": { + "description": "The AWS region to use. Bedrock has to be enabled there." }, - "aws_bedrock_config": { - "description": "AWS Bedrock Config", - "properties": { - "aws_access_key_id": { - "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." - }, - "aws_region": { - "description": "The AWS region to use. Bedrock has to be enabled there." - }, - "aws_secret_access_key": { - "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." - }, - "bedrock_provider": { - "description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." - } - } + "aws_secret_access_key": { + "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." }, - "cohere_config": { - "description": "Cohere Config", - "properties": { - "cohere_api_key": { - "description": "The Databricks secret key reference for a Cohere API key." - } - } + "bedrock_provider": { + "description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." + } + } + }, + "cohere_config": { + "description": "Cohere Config. Only required if the provider is 'cohere'.", + "properties": { + "cohere_api_key": { + "description": "The Databricks secret key reference for a Cohere API key." + } + } + }, + "databricks_model_serving_config": { + "description": "Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'.", + "properties": { + "databricks_api_token": { + "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" }, - "databricks_model_serving_config": { - "description": "Databricks Model Serving Config", - "properties": { - "databricks_api_token": { - "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" - }, - "databricks_workspace_url": { - "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" - } - } - }, - "openai_config": { - "description": "OpenAI Config", - "properties": { - "openai_api_base": { - "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" - }, - "openai_api_key": { - "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." - }, - "openai_api_type": { - "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" - }, - "openai_api_version": { - "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n" - }, - "openai_deployment_name": { - "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n" - }, - "openai_organization": { - "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n" - } - } - }, - "palm_config": { - "description": "PaLM Config", - "properties": { - "palm_api_key": { - "description": "The Databricks secret key reference for a PaLM API key." - } - } + "databricks_workspace_url": { + "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" } } }, "name": { "description": "The name of the external model." }, + "openai_config": { + "description": "OpenAI Config. Only required if the provider is 'openai'.", + "properties": { + "openai_api_base": { + "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" + }, + "openai_api_key": { + "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." + }, + "openai_api_type": { + "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" + }, + "openai_api_version": { + "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n" + }, + "openai_deployment_name": { + "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n" + }, + "openai_organization": { + "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n" + } + } + }, + "palm_config": { + "description": "PaLM Config. Only required if the provider is 'palm'.", + "properties": { + "palm_api_key": { + "description": "The Databricks secret key reference for a PaLM API key." + } + } + }, "provider": { "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" }, @@ -4306,6 +4302,12 @@ "instance_profile_arn": { "description": "ARN of the instance profile that the served entity uses to access AWS resources." }, + "max_provisioned_throughput": { + "description": "The maximum tokens per second that the endpoint can scale up to." + }, + "min_provisioned_throughput": { + "description": "The minimum tokens per second that the endpoint can scale down to." + }, "name": { "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.\nIf not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other\nentities, it defaults to \u003centity-name\u003e-\u003centity-version\u003e.\n" }, @@ -5063,7 +5065,53 @@ "variables": { "description": "", "additionalproperties": { - "description": "" + "description": "", + "properties": { + "default": { + "description": "" + }, + "description": { + "description": "" + }, + "lookup": { + "description": "", + "properties": { + "alert": { + "description": "" + }, + "cluster": { + "description": "" + }, + "cluster_policy": { + "description": "" + }, + "dashboard": { + "description": "" + }, + "instance_pool": { + "description": "" + }, + "job": { + "description": "" + }, + "metastore": { + "description": "" + }, + "pipeline": { + "description": "" + }, + "query": { + "description": "" + }, + "service_principal": { + "description": "" + }, + "warehouse": { + "description": "" + } + } + } + } } }, "workspace": { diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 47ad795e6..e365be7d1 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -33,6 +33,7 @@ import ( metastores "github.com/databricks/cli/cmd/workspace/metastores" model_registry "github.com/databricks/cli/cmd/workspace/model-registry" model_versions "github.com/databricks/cli/cmd/workspace/model-versions" + online_tables "github.com/databricks/cli/cmd/workspace/online-tables" permissions "github.com/databricks/cli/cmd/workspace/permissions" pipelines "github.com/databricks/cli/cmd/workspace/pipelines" policy_families "github.com/databricks/cli/cmd/workspace/policy-families" @@ -100,6 +101,7 @@ func All() []*cobra.Command { out = append(out, metastores.New()) out = append(out, model_registry.New()) out = append(out, model_versions.New()) + out = append(out, online_tables.New()) out = append(out, permissions.New()) out = append(out, pipelines.New()) out = append(out, policy_families.New()) diff --git a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go index 518e97c45..13383f36f 100755 --- a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go +++ b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go @@ -631,7 +631,7 @@ func newUpdate() *cobra.Command { // TODO: output-only field // TODO: complex arg: time_series - cmd.Use = "update FULL_NAME ASSETS_DIR OUTPUT_SCHEMA_NAME" + cmd.Use = "update FULL_NAME OUTPUT_SCHEMA_NAME" cmd.Short = `Update a table monitor.` cmd.Long = `Update a table monitor. @@ -651,7 +651,6 @@ func newUpdate() *cobra.Command { Arguments: FULL_NAME: Full name of the table. - ASSETS_DIR: The directory to store monitoring assets (e.g. dashboard, metric tables). OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.` cmd.Annotations = make(map[string]string) @@ -660,11 +659,11 @@ func newUpdate() *cobra.Command { if cmd.Flags().Changed("json") { err := cobra.ExactArgs(1)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, provide only FULL_NAME as positional arguments. Provide 'assets_dir', 'output_schema_name' in your JSON input") + return fmt.Errorf("when --json flag is specified, provide only FULL_NAME as positional arguments. Provide 'output_schema_name' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := cobra.ExactArgs(2) return check(cmd, args) } @@ -681,10 +680,7 @@ func newUpdate() *cobra.Command { } updateReq.FullName = args[0] if !cmd.Flags().Changed("json") { - updateReq.AssetsDir = args[1] - } - if !cmd.Flags().Changed("json") { - updateReq.OutputSchemaName = args[2] + updateReq.OutputSchemaName = args[1] } response, err := w.LakehouseMonitors.Update(ctx, updateReq) diff --git a/cmd/workspace/online-tables/online-tables.go b/cmd/workspace/online-tables/online-tables.go new file mode 100755 index 000000000..d97c52837 --- /dev/null +++ b/cmd/workspace/online-tables/online-tables.go @@ -0,0 +1,238 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package online_tables + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "online-tables", + Short: `Online tables provide lower latency and higher QPS access to data from Delta tables.`, + Long: `Online tables provide lower latency and higher QPS access to data from Delta + tables.`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + } + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *catalog.ViewData, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq catalog.ViewData + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Full three-part (catalog, schema, table) name of the table.`) + // TODO: complex arg: spec + + cmd.Use = "create" + cmd.Short = `Create an Online Table.` + cmd.Long = `Create an Online Table. + + Create a new Online Table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } + + response, err := w.OnlineTables.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newCreate()) + }) +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *catalog.DeleteOnlineTableRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq catalog.DeleteOnlineTableRequest + + // TODO: short flags + + cmd.Use = "delete NAME" + cmd.Short = `Delete an Online Table.` + cmd.Long = `Delete an Online Table. + + Delete an online table. Warning: This will delete all the data in the online + table. If the source Delta table was deleted or modified since this Online + Table was created, this will lose the data forever! + + Arguments: + NAME: Full three-part (catalog, schema, table) name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Name = args[0] + + err = w.OnlineTables.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newDelete()) + }) +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *catalog.GetOnlineTableRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq catalog.GetOnlineTableRequest + + // TODO: short flags + + cmd.Use = "get NAME" + cmd.Short = `Get an Online Table.` + cmd.Long = `Get an Online Table. + + Get information about an existing online table and its status. + + Arguments: + NAME: Full three-part (catalog, schema, table) name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.Name = args[0] + + response, err := w.OnlineTables.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +func init() { + cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { + cmd.AddCommand(newGet()) + }) +} + +// end service OnlineTables diff --git a/go.mod b/go.mod index 9fd37e6e0..dc01266cb 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.32.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.33.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 3826f15da..bbab6fc34 100644 --- a/go.sum +++ b/go.sum @@ -28,8 +28,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.32.0 h1:H6SQmfOOXd6x2fOp+zISkcR1nzJ7NTXXmIv8lWyK66Y= -github.com/databricks/databricks-sdk-go v0.32.0/go.mod h1:yyXGdhEfXBBsIoTm0mdl8QN0xzCQPUVZTozMM/7wVuI= +github.com/databricks/databricks-sdk-go v0.33.0 h1:0ldeP8aPnpKLV/mvNKsOVijOaLLo6TxRGdIwrEf2rlQ= +github.com/databricks/databricks-sdk-go v0.33.0/go.mod h1:yyXGdhEfXBBsIoTm0mdl8QN0xzCQPUVZTozMM/7wVuI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 5ba0aaa5c5ca9074cadcec61370abd76714b0ecf Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 20 Feb 2024 21:44:37 +0530 Subject: [PATCH 046/286] Add support for UC Volumes to the `databricks fs` commands (#1209) ## Changes ``` shreyas.goenka@THW32HFW6T cli % databricks fs -h Commands to do file system operations on DBFS and UC Volumes. Usage: databricks fs [command] Available Commands: cat Show file content. cp Copy files and directories. ls Lists files. mkdir Make directories. rm Remove files and directories. ``` This PR adds support for UC Volumes to the fs commands. The fs commands for UC volumes work the same as they currently do for DBFS. This is ensured by running the same test matrix we across both DBFS and UC Volumes versions of the fs commands. ## Tests Support for UC volumes is tested by running the same tests as we did originally for DBFS commands. The tests require a `main` catalog to exist in the workspace, which does in our test workspaces environments which have the `TEST_METASTORE_ID` environment variable set. For the Files API filer, we do the same by running mostly common tests to ensure the filers for "local", "wsfs", "dbfs" and "files API" are consistent. The tests are also made to all run in parallel to reduce the time taken. To ensure the separation of the tests, each test creates its own UC schema (for UC volumes tests) or DBFS directories (for DBFS tests). --- cmd/fs/cat.go | 4 +- cmd/fs/cp.go | 9 +- cmd/fs/fs.go | 2 +- cmd/fs/ls.go | 4 +- cmd/fs/mkdir.go | 4 +- cmd/fs/rm.go | 4 +- internal/filer_test.go | 322 +++++++++++++-------------------- internal/fs_cat_test.go | 68 ++++--- internal/fs_cp_test.go | 359 ++++++++++++++++++++++--------------- internal/fs_ls_test.go | 236 +++++++++++++----------- internal/fs_mkdir_test.go | 165 +++++++++-------- internal/fs_rm_test.go | 203 +++++++++++---------- internal/helpers.go | 98 ++++++++++ libs/filer/files_client.go | 331 +++++++++++++++++++++++++++++++--- 14 files changed, 1138 insertions(+), 671 deletions(-) diff --git a/cmd/fs/cat.go b/cmd/fs/cat.go index 8227cd781..be1866538 100644 --- a/cmd/fs/cat.go +++ b/cmd/fs/cat.go @@ -9,8 +9,8 @@ import ( func newCatCommand() *cobra.Command { cmd := &cobra.Command{ Use: "cat FILE_PATH", - Short: "Show file content", - Long: `Show the contents of a file.`, + Short: "Show file content.", + Long: `Show the contents of a file in DBFS or a UC Volume.`, Args: cobra.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } diff --git a/cmd/fs/cp.go b/cmd/fs/cp.go index 97fceb93c..f0f480fec 100644 --- a/cmd/fs/cp.go +++ b/cmd/fs/cp.go @@ -129,10 +129,10 @@ func (c *copy) emitFileCopiedEvent(sourcePath, targetPath string) error { func newCpCommand() *cobra.Command { cmd := &cobra.Command{ Use: "cp SOURCE_PATH TARGET_PATH", - Short: "Copy files and directories to and from DBFS.", - Long: `Copy files to and from DBFS. + Short: "Copy files and directories.", + Long: `Copy files and directories to and from any paths on DBFS, UC Volumes or your local filesystem. - For paths in DBFS it is required that you specify the "dbfs" scheme. + For paths in DBFS and UC Volumes, it is required that you specify the "dbfs" scheme. For example: dbfs:/foo/bar. Recursively copying a directory will copy all files inside directory @@ -152,9 +152,6 @@ func newCpCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - // TODO: Error if a user uses '\' as path separator on windows when "file" - // scheme is specified (https://github.com/databricks/cli/issues/485) - // Get source filer and source path without scheme fullSourcePath := args[0] sourceFiler, sourcePath, err := filerForPath(ctx, fullSourcePath) diff --git a/cmd/fs/fs.go b/cmd/fs/fs.go index 01d8a745b..1f36696a6 100644 --- a/cmd/fs/fs.go +++ b/cmd/fs/fs.go @@ -8,7 +8,7 @@ func New() *cobra.Command { cmd := &cobra.Command{ Use: "fs", Short: "Filesystem related commands", - Long: `Commands to do DBFS operations.`, + Long: `Commands to do file system operations on DBFS and UC Volumes.`, GroupID: "workspace", } diff --git a/cmd/fs/ls.go b/cmd/fs/ls.go index 7ae55e1f4..be52b9289 100644 --- a/cmd/fs/ls.go +++ b/cmd/fs/ls.go @@ -40,8 +40,8 @@ func toJsonDirEntry(f fs.DirEntry, baseDir string, isAbsolute bool) (*jsonDirEnt func newLsCommand() *cobra.Command { cmd := &cobra.Command{ Use: "ls DIR_PATH", - Short: "Lists files", - Long: `Lists files`, + Short: "Lists files.", + Long: `Lists files in DBFS and UC Volumes.`, Args: cobra.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } diff --git a/cmd/fs/mkdir.go b/cmd/fs/mkdir.go index c6a5e607c..dc054d8a7 100644 --- a/cmd/fs/mkdir.go +++ b/cmd/fs/mkdir.go @@ -11,8 +11,8 @@ func newMkdirCommand() *cobra.Command { // Alias `mkdirs` for this command exists for legacy purposes. This command // is called databricks fs mkdirs in our legacy CLI: https://github.com/databricks/databricks-cli Aliases: []string{"mkdirs"}, - Short: "Make directories", - Long: `Mkdir will create directories along the path to the argument directory.`, + Short: "Make directories.", + Long: `Make directories in DBFS and UC Volumes. Mkdir will create directories along the path to the argument directory.`, Args: cobra.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } diff --git a/cmd/fs/rm.go b/cmd/fs/rm.go index 3ce8d3b93..8a7b6571d 100644 --- a/cmd/fs/rm.go +++ b/cmd/fs/rm.go @@ -9,8 +9,8 @@ import ( func newRmCommand() *cobra.Command { cmd := &cobra.Command{ Use: "rm PATH", - Short: "Remove files and directories from dbfs.", - Long: `Remove files and directories from dbfs.`, + Short: "Remove files and directories.", + Long: `Remove files and directories from DBFS and UC Volumes.`, Args: cobra.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } diff --git a/internal/filer_test.go b/internal/filer_test.go index b1af6886c..d333a1b70 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -6,14 +6,11 @@ import ( "errors" "io" "io/fs" - "net/http" "regexp" "strings" "testing" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -40,15 +37,87 @@ func (f filerTest) assertContents(ctx context.Context, name string, contents str assert.Equal(f, contents, body.String()) } -func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) { +func commonFilerRecursiveDeleteTest(t *testing.T, ctx context.Context, f filer.Filer) { var err error - // Write should fail because the root path doesn't yet exist. + err = f.Write(ctx, "dir/file1", strings.NewReader("content1"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/file1", `content1`) + + err = f.Write(ctx, "dir/file2", strings.NewReader("content2"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/file2", `content2`) + + err = f.Write(ctx, "dir/subdir1/file3", strings.NewReader("content3"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/subdir1/file3", `content3`) + + err = f.Write(ctx, "dir/subdir1/file4", strings.NewReader("content4"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/subdir1/file4", `content4`) + + err = f.Write(ctx, "dir/subdir2/file5", strings.NewReader("content5"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/subdir2/file5", `content5`) + + err = f.Write(ctx, "dir/subdir2/file6", strings.NewReader("content6"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/subdir2/file6", `content6`) + + entriesBeforeDelete, err := f.ReadDir(ctx, "dir") + require.NoError(t, err) + assert.Len(t, entriesBeforeDelete, 4) + + names := []string{} + for _, e := range entriesBeforeDelete { + names = append(names, e.Name()) + } + assert.Equal(t, names, []string{"file1", "file2", "subdir1", "subdir2"}) + + err = f.Delete(ctx, "dir") + assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) + + err = f.Delete(ctx, "dir", filer.DeleteRecursively) + assert.NoError(t, err) + _, err = f.ReadDir(ctx, "dir") + assert.ErrorAs(t, err, &filer.NoSuchDirectoryError{}) +} + +func TestAccFilerRecursiveDelete(t *testing.T) { + t.Parallel() + + for _, testCase := range []struct { + name string + f func(t *testing.T) (filer.Filer, string) + }{ + {"local", setupLocalFiler}, + {"workspace files", setupWsfsFiler}, + {"dbfs", setupDbfsFiler}, + {"files", setupUcVolumesFiler}, + } { + tc := testCase + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + f, _ := tc.f(t) + ctx := context.Background() + + // Common tests we run across all filers to ensure consistent behavior. + commonFilerRecursiveDeleteTest(t, ctx, f) + }) + } +} + +// Common tests we run across all filers to ensure consistent behavior. +func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer) { + var err error + + // Write should fail because the intermediate directory doesn't exist. err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello world`)) assert.True(t, errors.As(err, &filer.NoSuchDirectoryError{})) assert.True(t, errors.Is(err, fs.ErrNotExist)) - // Read should fail because the root path doesn't yet exist. + // Read should fail because the intermediate directory doesn't yet exist. _, err = f.Read(ctx, "/foo/bar") assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) assert.True(t, errors.Is(err, fs.ErrNotExist)) @@ -96,12 +165,12 @@ func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) { // Delete should fail if the file doesn't exist. err = f.Delete(ctx, "/doesnt_exist") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) + assert.ErrorAs(t, err, &filer.FileDoesNotExistError{}) assert.True(t, errors.Is(err, fs.ErrNotExist)) // Stat should fail if the file doesn't exist. _, err = f.Stat(ctx, "/doesnt_exist") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) + assert.ErrorAs(t, err, &filer.FileDoesNotExistError{}) assert.True(t, errors.Is(err, fs.ErrNotExist)) // Delete should succeed for file that does exist. @@ -110,7 +179,7 @@ func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) { // Delete should fail for a non-empty directory. err = f.Delete(ctx, "/foo") - assert.True(t, errors.As(err, &filer.DirectoryNotEmptyError{})) + assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) assert.True(t, errors.Is(err, fs.ErrInvalid)) // Delete should succeed for a non-empty directory if the DeleteRecursively flag is set. @@ -124,7 +193,33 @@ func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.True(t, errors.Is(err, fs.ErrInvalid)) } -func runFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { +func TestAccFilerReadWrite(t *testing.T) { + t.Parallel() + + for _, testCase := range []struct { + name string + f func(t *testing.T) (filer.Filer, string) + }{ + {"local", setupLocalFiler}, + {"workspace files", setupWsfsFiler}, + {"dbfs", setupDbfsFiler}, + {"files", setupUcVolumesFiler}, + } { + tc := testCase + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + f, _ := tc.f(t) + ctx := context.Background() + + // Common tests we run across all filers to ensure consistent behavior. + commonFilerReadWriteTests(t, ctx, f) + }) + } +} + +// Common tests we run across all filers to ensure consistent behavior. +func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { var err error var info fs.FileInfo @@ -206,54 +301,28 @@ func runFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.False(t, entries[0].IsDir()) } -func setupWorkspaceFilesTest(t *testing.T) (context.Context, filer.Filer) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFilerReadDir(t *testing.T) { + t.Parallel() - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryWorkspaceDir(t, w) - f, err := filer.NewWorkspaceFilesClient(w, tmpdir) - require.NoError(t, err) + for _, testCase := range []struct { + name string + f func(t *testing.T) (filer.Filer, string) + }{ + {"local", setupLocalFiler}, + {"workspace files", setupWsfsFiler}, + {"dbfs", setupDbfsFiler}, + {"files", setupUcVolumesFiler}, + } { + tc := testCase - // Check if we can use this API here, skip test if we cannot. - _, err = f.Read(ctx, "we_use_this_call_to_test_if_this_api_is_enabled") - var aerr *apierr.APIError - if errors.As(err, &aerr) && aerr.StatusCode == http.StatusBadRequest { - t.Skip(aerr.Message) + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + f, _ := tc.f(t) + ctx := context.Background() + + commonFilerReadDirTest(t, ctx, f) + }) } - - return ctx, f -} - -func TestAccFilerWorkspaceFilesReadWrite(t *testing.T) { - ctx, f := setupWorkspaceFilesTest(t) - runFilerReadWriteTest(t, ctx, f) -} - -func TestAccFilerWorkspaceFilesReadDir(t *testing.T) { - ctx, f := setupWorkspaceFilesTest(t) - runFilerReadDirTest(t, ctx, f) -} - -func setupFilerDbfsTest(t *testing.T) (context.Context, filer.Filer) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryDbfsDir(t, w) - f, err := filer.NewDbfsClient(w, tmpdir) - require.NoError(t, err) - return ctx, f -} - -func TestAccFilerDbfsReadWrite(t *testing.T) { - ctx, f := setupFilerDbfsTest(t) - runFilerReadWriteTest(t, ctx, f) -} - -func TestAccFilerDbfsReadDir(t *testing.T) { - ctx, f := setupFilerDbfsTest(t) - runFilerReadDirTest(t, ctx, f) } var jupyterNotebookContent1 = ` @@ -305,7 +374,8 @@ var jupyterNotebookContent2 = ` ` func TestAccFilerWorkspaceNotebookConflict(t *testing.T) { - ctx, f := setupWorkspaceFilesTest(t) + f, _ := setupWsfsFiler(t) + ctx := context.Background() var err error // Upload the notebooks @@ -350,7 +420,8 @@ func TestAccFilerWorkspaceNotebookConflict(t *testing.T) { } func TestAccFilerWorkspaceNotebookWithOverwriteFlag(t *testing.T) { - ctx, f := setupWorkspaceFilesTest(t) + f, _ := setupWsfsFiler(t) + ctx := context.Background() var err error // Upload notebooks @@ -391,140 +462,3 @@ func TestAccFilerWorkspaceNotebookWithOverwriteFlag(t *testing.T) { filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"second upload\"))") filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 2\")") } - -func setupFilerLocalTest(t *testing.T) (context.Context, filer.Filer) { - ctx := context.Background() - f, err := filer.NewLocalClient(t.TempDir()) - require.NoError(t, err) - return ctx, f -} - -func TestAccFilerLocalReadWrite(t *testing.T) { - ctx, f := setupFilerLocalTest(t) - runFilerReadWriteTest(t, ctx, f) -} - -func TestAccFilerLocalReadDir(t *testing.T) { - ctx, f := setupFilerLocalTest(t) - runFilerReadDirTest(t, ctx, f) -} - -func temporaryVolumeDir(t *testing.T, w *databricks.WorkspaceClient) string { - // Assume this test is run against the internal testing workspace. - path := RandomName("/Volumes/bogdanghita/default/v3_shared/cli-testing/integration-test-filer-") - - // The Files API doesn't include support for creating and removing directories yet. - // Directories are created implicitly by writing a file to a path that doesn't exist. - // We therefore assume we can use the specified path without creating it first. - t.Logf("using dbfs:%s", path) - - return path -} - -func setupFilerFilesApiTest(t *testing.T) (context.Context, filer.Filer) { - t.SkipNow() // until available on prod - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := temporaryVolumeDir(t, w) - f, err := filer.NewFilesClient(w, tmpdir) - require.NoError(t, err) - return ctx, f -} - -func TestAccFilerFilesApiReadWrite(t *testing.T) { - ctx, f := setupFilerFilesApiTest(t) - - // The Files API doesn't know about directories yet. - // Below is a copy of [runFilerReadWriteTest] with - // assertions that don't work commented out. - - var err error - - // Write should fail because the root path doesn't yet exist. - // err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello world`)) - // assert.True(t, errors.As(err, &filer.NoSuchDirectoryError{})) - // assert.True(t, errors.Is(err, fs.ErrNotExist)) - - // Read should fail because the root path doesn't yet exist. - _, err = f.Read(ctx, "/foo/bar") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) - assert.True(t, errors.Is(err, fs.ErrNotExist)) - - // Read should fail because the path points to a directory - // err = f.Mkdir(ctx, "/dir") - // require.NoError(t, err) - // _, err = f.Read(ctx, "/dir") - // assert.ErrorIs(t, err, fs.ErrInvalid) - - // Write with CreateParentDirectories flag should succeed. - err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello world`), filer.CreateParentDirectories) - assert.NoError(t, err) - filerTest{t, f}.assertContents(ctx, "/foo/bar", `hello world`) - - // Write should fail because there is an existing file at the specified path. - err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello universe`)) - assert.True(t, errors.As(err, &filer.FileAlreadyExistsError{})) - assert.True(t, errors.Is(err, fs.ErrExist)) - - // Write with OverwriteIfExists should succeed. - err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello universe`), filer.OverwriteIfExists) - assert.NoError(t, err) - filerTest{t, f}.assertContents(ctx, "/foo/bar", `hello universe`) - - // Write should succeed if there is no existing file at the specified path. - err = f.Write(ctx, "/foo/qux", strings.NewReader(`hello universe`)) - assert.NoError(t, err) - - // Stat on a directory should succeed. - // Note: size and modification time behave differently between backends. - info, err := f.Stat(ctx, "/foo") - require.NoError(t, err) - assert.Equal(t, "foo", info.Name()) - assert.True(t, info.Mode().IsDir()) - assert.Equal(t, true, info.IsDir()) - - // Stat on a file should succeed. - // Note: size and modification time behave differently between backends. - info, err = f.Stat(ctx, "/foo/bar") - require.NoError(t, err) - assert.Equal(t, "bar", info.Name()) - assert.True(t, info.Mode().IsRegular()) - assert.Equal(t, false, info.IsDir()) - - // Delete should fail if the file doesn't exist. - err = f.Delete(ctx, "/doesnt_exist") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) - assert.True(t, errors.Is(err, fs.ErrNotExist)) - - // Stat should fail if the file doesn't exist. - _, err = f.Stat(ctx, "/doesnt_exist") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) - assert.True(t, errors.Is(err, fs.ErrNotExist)) - - // Delete should succeed for file that does exist. - err = f.Delete(ctx, "/foo/bar") - assert.NoError(t, err) - - // Delete should fail for a non-empty directory. - err = f.Delete(ctx, "/foo") - assert.True(t, errors.As(err, &filer.DirectoryNotEmptyError{})) - assert.True(t, errors.Is(err, fs.ErrInvalid)) - - // Delete should succeed for a non-empty directory if the DeleteRecursively flag is set. - // err = f.Delete(ctx, "/foo", filer.DeleteRecursively) - // assert.NoError(t, err) - - // Delete of the filer root should ALWAYS fail, otherwise subsequent writes would fail. - // It is not in the filer's purview to delete its root directory. - err = f.Delete(ctx, "/") - assert.True(t, errors.As(err, &filer.CannotDeleteRootError{})) - assert.True(t, errors.Is(err, fs.ErrInvalid)) -} - -func TestAccFilerFilesApiReadDir(t *testing.T) { - t.Skipf("no support for ReadDir yet") - ctx, f := setupFilerFilesApiTest(t) - runFilerReadDirTest(t, ctx, f) -} diff --git a/internal/fs_cat_test.go b/internal/fs_cat_test.go index 2c979ea73..6292aef18 100644 --- a/internal/fs_cat_test.go +++ b/internal/fs_cat_test.go @@ -13,31 +13,60 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccFsCatForDbfs(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsCat(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) + err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) + require.NoError(t, err) - err = f.Write(ctx, "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) - - stdout, stderr := RequireSuccessfulRun(t, "fs", "cat", "dbfs:"+path.Join(tmpDir, "a", "hello.txt")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "abc", stdout.String()) + stdout, stderr := RequireSuccessfulRun(t, "fs", "cat", path.Join(tmpDir, "hello.txt")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "abcd", stdout.String()) + }) + } } -func TestAccFsCatForDbfsOnNonExistentFile(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsCatOnADir(t *testing.T) { + t.Parallel() - _, _, err := RequireErrorRun(t, "fs", "cat", "dbfs:/non-existent-file") - assert.ErrorIs(t, err, fs.ErrNotExist) + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + f, tmpDir := tc.setupFiler(t) + err := f.Mkdir(context.Background(), "dir1") + require.NoError(t, err) + + _, _, err = RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "dir1")) + assert.ErrorAs(t, err, &filer.NotAFile{}) + }) + } +} + +func TestAccFsCatOnNonExistentFile(t *testing.T) { + t.Parallel() + + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + _, tmpDir := tc.setupFiler(t) + + _, _, err := RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "non-existent-file")) + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } } func TestAccFsCatForDbfsInvalidScheme(t *testing.T) { @@ -65,6 +94,3 @@ func TestAccFsCatDoesNotSupportOutputModeJson(t *testing.T) { _, _, err = RequireErrorRun(t, "fs", "cat", "dbfs:"+path.Join(tmpDir, "hello.txt"), "--output=json") assert.ErrorContains(t, err, "json output not supported") } - -// TODO: Add test asserting an error when cat is called on an directory. Need this to be -// fixed in the SDK first (https://github.com/databricks/databricks-sdk-go/issues/414) diff --git a/internal/fs_cp_test.go b/internal/fs_cp_test.go index ab177a36f..b69735bc0 100644 --- a/internal/fs_cp_test.go +++ b/internal/fs_cp_test.go @@ -2,16 +2,15 @@ package internal import ( "context" - "fmt" "io" "path" "path/filepath" + "regexp" "runtime" "strings" "testing" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -60,84 +59,124 @@ func assertTargetDir(t *testing.T, ctx context.Context, f filer.Filer) { assertFileContent(t, ctx, f, "a/b/c/hello.txt", "hello, world\n") } -func setupLocalFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - tmp := t.TempDir() - f, err := filer.NewLocalClient(tmp) - require.NoError(t, err) - - return f, path.Join(filepath.ToSlash(tmp)) -} - -func setupDbfsFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) - - return f, path.Join("dbfs:/", tmpDir) -} - type cpTest struct { + name string setupSource func(*testing.T) (filer.Filer, string) setupTarget func(*testing.T) (filer.Filer, string) } -func setupTable() []cpTest { +func copyTests() []cpTest { return []cpTest{ - {setupSource: setupLocalFiler, setupTarget: setupLocalFiler}, - {setupSource: setupLocalFiler, setupTarget: setupDbfsFiler}, - {setupSource: setupDbfsFiler, setupTarget: setupLocalFiler}, - {setupSource: setupDbfsFiler, setupTarget: setupDbfsFiler}, + // source: local file system + { + name: "local to local", + setupSource: setupLocalFiler, + setupTarget: setupLocalFiler, + }, + { + name: "local to dbfs", + setupSource: setupLocalFiler, + setupTarget: setupDbfsFiler, + }, + { + name: "local to uc-volumes", + setupSource: setupLocalFiler, + setupTarget: setupUcVolumesFiler, + }, + + // source: dbfs + { + name: "dbfs to local", + setupSource: setupDbfsFiler, + setupTarget: setupLocalFiler, + }, + { + name: "dbfs to dbfs", + setupSource: setupDbfsFiler, + setupTarget: setupDbfsFiler, + }, + { + name: "dbfs to uc-volumes", + setupSource: setupDbfsFiler, + setupTarget: setupUcVolumesFiler, + }, + + // source: uc-volumes + { + name: "uc-volumes to local", + setupSource: setupUcVolumesFiler, + setupTarget: setupLocalFiler, + }, + { + name: "uc-volumes to dbfs", + setupSource: setupUcVolumesFiler, + setupTarget: setupDbfsFiler, + }, + { + name: "uc-volumes to uc-volumes", + setupSource: setupUcVolumesFiler, + setupTarget: setupUcVolumesFiler, + }, } } func TestAccFsCpDir(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - RequireSuccessfulRun(t, "fs", "cp", "-r", sourceDir, targetDir) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - assertTargetDir(t, ctx, targetFiler) + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + + assertTargetDir(t, context.Background(), targetFiler) + }) } } func TestAccFsCpFileToFile(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceFile(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - assertTargetFile(t, ctx, targetFiler, "bar.txt") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceFile(t, context.Background(), sourceFiler) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) + + assertTargetFile(t, context.Background(), targetFiler, "bar.txt") + }) } } func TestAccFsCpFileToDir(t *testing.T) { - ctx := context.Background() - table := setupTable() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceFile(t, ctx, sourceFiler) + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) + for _, testCase := range copyTests() { + tc := testCase - assertTargetFile(t, ctx, targetFiler, "foo.txt") + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceFile(t, context.Background(), sourceFiler) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) + + assertTargetFile(t, context.Background(), targetFiler, "foo.txt") + }) } } @@ -158,125 +197,161 @@ func TestAccFsCpFileToDirForWindowsPaths(t *testing.T) { } func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") - assertFileContent(t, ctx, targetFiler, "a/b/c/hello.txt", "this should not be overwritten") - assertFileContent(t, ctx, targetFiler, "query.sql", "SELECT 1") - assertFileContent(t, ctx, targetFiler, "pyNb.py", "# Databricks notebook source\nprint(123)") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") + assertFileContent(t, context.Background(), targetFiler, "query.sql", "SELECT 1") + assertFileContent(t, context.Background(), targetFiler, "pyNb.py", "# Databricks notebook source\nprint(123)") + }) } } func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) - assertFileContent(t, ctx, targetFiler, "a/b/c/hello.txt", "this should not be overwritten") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) + assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") + }) } } func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hola.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/hola.txt"), "--recursive") - assertFileContent(t, ctx, targetFiler, "a/b/c/hola.txt", "this should not be overwritten") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/dontoverwrite.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/dontoverwrite.txt")) + assertFileContent(t, context.Background(), targetFiler, "a/b/c/dontoverwrite.txt", "this should not be overwritten") + }) } } func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this will be overwritten"), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") - assertTargetDir(t, ctx, targetFiler) + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") + assertTargetDir(t, context.Background(), targetFiler) + }) } } func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hola.txt", strings.NewReader("this will be overwritten. Such is life."), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/hola.txt"), "--overwrite") - assertFileContent(t, ctx, targetFiler, "a/b/c/hola.txt", "hello, world\n") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/overwritten.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/overwritten.txt"), "--overwrite") + assertFileContent(t, context.Background(), targetFiler, "a/b/c/overwritten.txt", "hello, world\n") + }) } } func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this will be overwritten :') "), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--recursive", "--overwrite") - assertFileContent(t, ctx, targetFiler, "a/b/c/hello.txt", "hello, world\n") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--overwrite") + assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "hello, world\n") + }) } } func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Parallel() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - _, _, err = RequireErrorRun(t, "fs", "cp", "dbfs:"+tmpDir, "dbfs:/tmp") - assert.Equal(t, fmt.Sprintf("source path %s is a directory. Please specify the --recursive flag", tmpDir), err.Error()) + _, tmpDir := tc.setupFiler(t) + + _, _, err := RequireErrorRun(t, "fs", "cp", path.Join(tmpDir), path.Join(tmpDir, "foobar")) + r := regexp.MustCompile("source path .* is a directory. Please specify the --recursive flag") + assert.Regexp(t, r, err.Error()) + }) + } } func TestAccFsCpErrorsOnInvalidScheme(t *testing.T) { @@ -287,20 +362,24 @@ func TestAccFsCpErrorsOnInvalidScheme(t *testing.T) { } func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "my_target", strings.NewReader("I'll block any attempts to recursively copy"), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - _, _, err = RequireErrorRun(t, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive", "--overwrite") - assert.Error(t, err) + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "my_target", strings.NewReader("I'll block any attempts to recursively copy"), filer.CreateParentDirectories) + require.NoError(t, err) + + _, _, err = RequireErrorRun(t, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive") + assert.Error(t, err) + }) } - } diff --git a/internal/fs_ls_test.go b/internal/fs_ls_test.go index 9e02b09cc..994a4a425 100644 --- a/internal/fs_ls_test.go +++ b/internal/fs_ls_test.go @@ -11,131 +11,163 @@ import ( _ "github.com/databricks/cli/cmd/fs" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsLsForDbfs(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) - - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) - - err = f.Mkdir(ctx, "a") - require.NoError(t, err) - err = f.Write(ctx, "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) - err = f.Write(ctx, "bye.txt", strings.NewReader("def")) - require.NoError(t, err) - - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", "dbfs:"+tmpDir, "--output=json") - assert.Equal(t, "", stderr.String()) - var parsedStdout []map[string]any - err = json.Unmarshal(stdout.Bytes(), &parsedStdout) - require.NoError(t, err) - - // assert on ls output - assert.Len(t, parsedStdout, 2) - assert.Equal(t, "a", parsedStdout[0]["name"]) - assert.Equal(t, true, parsedStdout[0]["is_directory"]) - assert.Equal(t, float64(0), parsedStdout[0]["size"]) - assert.Equal(t, "bye.txt", parsedStdout[1]["name"]) - assert.Equal(t, false, parsedStdout[1]["is_directory"]) - assert.Equal(t, float64(3), parsedStdout[1]["size"]) +type fsTest struct { + name string + setupFiler func(t *testing.T) (filer.Filer, string) } -func TestAccFsLsForDbfsWithAbsolutePaths(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) - - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) - - err = f.Mkdir(ctx, "a") - require.NoError(t, err) - err = f.Write(ctx, "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) - err = f.Write(ctx, "bye.txt", strings.NewReader("def")) - require.NoError(t, err) - - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", "dbfs:"+tmpDir, "--output=json", "--absolute") - assert.Equal(t, "", stderr.String()) - var parsedStdout []map[string]any - err = json.Unmarshal(stdout.Bytes(), &parsedStdout) - require.NoError(t, err) - - // assert on ls output - assert.Len(t, parsedStdout, 2) - assert.Equal(t, path.Join("dbfs:", tmpDir, "a"), parsedStdout[0]["name"]) - assert.Equal(t, true, parsedStdout[0]["is_directory"]) - assert.Equal(t, float64(0), parsedStdout[0]["size"]) - - assert.Equal(t, path.Join("dbfs:", tmpDir, "bye.txt"), parsedStdout[1]["name"]) - assert.Equal(t, false, parsedStdout[1]["is_directory"]) - assert.Equal(t, float64(3), parsedStdout[1]["size"]) +var fsTests = []fsTest{ + { + name: "dbfs", + setupFiler: setupDbfsFiler, + }, + { + name: "uc-volumes", + setupFiler: setupUcVolumesFiler, + }, } -func TestAccFsLsForDbfsOnFile(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() +func setupLsFiles(t *testing.T, f filer.Filer) { + err := f.Write(context.Background(), "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories) require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) - - f, err := filer.NewDbfsClient(w, tmpDir) + err = f.Write(context.Background(), "bye.txt", strings.NewReader("def")) require.NoError(t, err) - - err = f.Mkdir(ctx, "a") - require.NoError(t, err) - err = f.Write(ctx, "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) - - _, _, err = RequireErrorRun(t, "fs", "ls", "dbfs:"+path.Join(tmpDir, "a", "hello.txt"), "--output=json") - assert.Regexp(t, regexp.MustCompile("not a directory: .*/a/hello.txt"), err.Error()) } -func TestAccFsLsForDbfsOnEmptyDir(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsLs(t *testing.T) { + t.Parallel() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", "dbfs:"+tmpDir, "--output=json") - assert.Equal(t, "", stderr.String()) - var parsedStdout []map[string]any - err = json.Unmarshal(stdout.Bytes(), &parsedStdout) - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) + setupLsFiles(t, f) - // assert on ls output - assert.Equal(t, 0, len(parsedStdout)) + stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + assert.Equal(t, "", stderr.String()) + + var parsedStdout []map[string]any + err := json.Unmarshal(stdout.Bytes(), &parsedStdout) + require.NoError(t, err) + + // assert on ls output + assert.Len(t, parsedStdout, 2) + + assert.Equal(t, "a", parsedStdout[0]["name"]) + assert.Equal(t, true, parsedStdout[0]["is_directory"]) + assert.Equal(t, float64(0), parsedStdout[0]["size"]) + + assert.Equal(t, "bye.txt", parsedStdout[1]["name"]) + assert.Equal(t, false, parsedStdout[1]["is_directory"]) + assert.Equal(t, float64(3), parsedStdout[1]["size"]) + }) + } } -func TestAccFsLsForDbfsForNonexistingDir(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsLsWithAbsolutePaths(t *testing.T) { + t.Parallel() - _, _, err := RequireErrorRun(t, "fs", "ls", "dbfs:/john-cena", "--output=json") - assert.ErrorIs(t, err, fs.ErrNotExist) + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + f, tmpDir := tc.setupFiler(t) + setupLsFiles(t, f) + + stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json", "--absolute") + assert.Equal(t, "", stderr.String()) + + var parsedStdout []map[string]any + err := json.Unmarshal(stdout.Bytes(), &parsedStdout) + require.NoError(t, err) + + // assert on ls output + assert.Len(t, parsedStdout, 2) + + assert.Equal(t, path.Join(tmpDir, "a"), parsedStdout[0]["name"]) + assert.Equal(t, true, parsedStdout[0]["is_directory"]) + assert.Equal(t, float64(0), parsedStdout[0]["size"]) + + assert.Equal(t, path.Join(tmpDir, "bye.txt"), parsedStdout[1]["name"]) + assert.Equal(t, false, parsedStdout[1]["is_directory"]) + assert.Equal(t, float64(3), parsedStdout[1]["size"]) + }) + } +} + +func TestAccFsLsOnFile(t *testing.T) { + t.Parallel() + + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + f, tmpDir := tc.setupFiler(t) + setupLsFiles(t, f) + + _, _, err := RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json") + assert.Regexp(t, regexp.MustCompile("not a directory: .*/a/hello.txt"), err.Error()) + assert.ErrorAs(t, err, &filer.NotADirectory{}) + }) + } +} + +func TestAccFsLsOnEmptyDir(t *testing.T) { + t.Parallel() + + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + _, tmpDir := tc.setupFiler(t) + + stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + assert.Equal(t, "", stderr.String()) + var parsedStdout []map[string]any + err := json.Unmarshal(stdout.Bytes(), &parsedStdout) + require.NoError(t, err) + + // assert on ls output + assert.Equal(t, 0, len(parsedStdout)) + }) + } +} + +func TestAccFsLsForNonexistingDir(t *testing.T) { + t.Parallel() + + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + _, tmpDir := tc.setupFiler(t) + + _, _, err := RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json") + assert.ErrorIs(t, err, fs.ErrNotExist) + assert.Regexp(t, regexp.MustCompile("no such directory: .*/nonexistent"), err.Error()) + }) + } } func TestAccFsLsWithoutScheme(t *testing.T) { + t.Parallel() + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := RequireErrorRun(t, "fs", "ls", "/ray-mysterio", "--output=json") + _, _, err := RequireErrorRun(t, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) } diff --git a/internal/fs_mkdir_test.go b/internal/fs_mkdir_test.go index af0e9d187..dd75c7c32 100644 --- a/internal/fs_mkdir_test.go +++ b/internal/fs_mkdir_test.go @@ -8,110 +8,127 @@ import ( "testing" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsMkdirCreatesDirectory(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsMkdir(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) - // create directory "a" - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", "dbfs:"+path.Join(tmpDir, "a")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) + // create directory "a" + stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) - // assert directory "a" is created - info, err := f.Stat(ctx, "a") - require.NoError(t, err) - assert.Equal(t, "a", info.Name()) - assert.Equal(t, true, info.IsDir()) + // assert directory "a" is created + info, err := f.Stat(context.Background(), "a") + require.NoError(t, err) + assert.Equal(t, "a", info.Name()) + assert.Equal(t, true, info.IsDir()) + }) + } } -func TestAccFsMkdirCreatesMultipleDirectories(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) - // create directory /a/b/c - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", "dbfs:"+path.Join(tmpDir, "a", "b", "c")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) + // create directory "a/b/c" + stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a", "b", "c")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) - // assert directory "a" is created - infoA, err := f.Stat(ctx, "a") - require.NoError(t, err) - assert.Equal(t, "a", infoA.Name()) - assert.Equal(t, true, infoA.IsDir()) + // assert directory "a" is created + infoA, err := f.Stat(context.Background(), "a") + require.NoError(t, err) + assert.Equal(t, "a", infoA.Name()) + assert.Equal(t, true, infoA.IsDir()) - // assert directory "b" is created - infoB, err := f.Stat(ctx, "a/b") - require.NoError(t, err) - assert.Equal(t, "b", infoB.Name()) - assert.Equal(t, true, infoB.IsDir()) + // assert directory "b" is created + infoB, err := f.Stat(context.Background(), "a/b") + require.NoError(t, err) + assert.Equal(t, "b", infoB.Name()) + assert.Equal(t, true, infoB.IsDir()) - // assert directory "c" is created - infoC, err := f.Stat(ctx, "a/b/c") - require.NoError(t, err) - assert.Equal(t, "c", infoC.Name()) - assert.Equal(t, true, infoC.IsDir()) + // assert directory "c" is created + infoC, err := f.Stat(context.Background(), "a/b/c") + require.NoError(t, err) + assert.Equal(t, "c", infoC.Name()) + assert.Equal(t, true, infoC.IsDir()) + }) + } } func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - // create directory "a" - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) - err = f.Mkdir(ctx, "a") - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) - // assert run is successful without any errors - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", "dbfs:"+path.Join(tmpDir, "a")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) + // create directory "a" + err := f.Mkdir(context.Background(), "a") + require.NoError(t, err) + + // assert run is successful without any errors + stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) + }) + } } func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + t.Run("dbfs", func(t *testing.T) { + t.Parallel() - tmpDir := TemporaryDbfsDir(t, w) + f, tmpDir := setupDbfsFiler(t) - // create file hello - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) - err = f.Write(ctx, "hello", strings.NewReader("abc")) - require.NoError(t, err) + // create file "hello" + err := f.Write(context.Background(), "hello", strings.NewReader("abc")) + require.NoError(t, err) - // assert run fails - _, _, err = RequireErrorRun(t, "fs", "mkdir", "dbfs:"+path.Join(tmpDir, "hello")) - // Different cloud providers return different errors. - regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$`) - assert.Regexp(t, regex, err.Error()) + // assert mkdir fails + _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + + // Different cloud providers return different errors. + regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$`) + assert.Regexp(t, regex, err.Error()) + }) + + t.Run("uc-volumes", func(t *testing.T) { + t.Parallel() + + f, tmpDir := setupUcVolumesFiler(t) + + // create file "hello" + err := f.Write(context.Background(), "hello", strings.NewReader("abc")) + require.NoError(t, err) + + // assert mkdir fails + _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + + assert.ErrorAs(t, err, &filer.FileAlreadyExistsError{}) + }) } diff --git a/internal/fs_rm_test.go b/internal/fs_rm_test.go index d70827d1a..e86f5713b 100644 --- a/internal/fs_rm_test.go +++ b/internal/fs_rm_test.go @@ -8,139 +8,150 @@ import ( "testing" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsRmForFile(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsRmFile(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + // Create a file + f, tmpDir := tc.setupFiler(t) + err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) + require.NoError(t, err) - // create file to delete - err = f.Write(ctx, "hello.txt", strings.NewReader("abc")) - require.NoError(t, err) + // Check file was created + _, err = f.Stat(context.Background(), "hello.txt") + assert.NoError(t, err) - // check file was created - info, err := f.Stat(ctx, "hello.txt") - require.NoError(t, err) - require.Equal(t, "hello.txt", info.Name()) - require.Equal(t, info.IsDir(), false) + // Run rm command + stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "hello.txt")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) - // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", "dbfs:"+path.Join(tmpDir, "hello.txt")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) - - // assert file was deleted - _, err = f.Stat(ctx, "hello.txt") - assert.ErrorIs(t, err, fs.ErrNotExist) + // Assert file was deleted + _, err = f.Stat(context.Background(), "hello.txt") + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } } -func TestAccFsRmForEmptyDirectory(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsRmEmptyDir(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + // Create a directory + f, tmpDir := tc.setupFiler(t) + err := f.Mkdir(context.Background(), "a") + require.NoError(t, err) - // create directory to delete - err = f.Mkdir(ctx, "avacado") - require.NoError(t, err) + // Check directory was created + _, err = f.Stat(context.Background(), "a") + assert.NoError(t, err) - // check directory was created - info, err := f.Stat(ctx, "avacado") - require.NoError(t, err) - require.Equal(t, "avacado", info.Name()) - require.Equal(t, info.IsDir(), true) + // Run rm command + stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) - // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", "dbfs:"+path.Join(tmpDir, "avacado")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) - - // assert directory was deleted - _, err = f.Stat(ctx, "avacado") - assert.ErrorIs(t, err, fs.ErrNotExist) + // Assert directory was deleted + _, err = f.Stat(context.Background(), "a") + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } } -func TestAccFsRmForNonEmptyDirectory(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsRmNonEmptyDirectory(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + // Create a directory + f, tmpDir := tc.setupFiler(t) + err := f.Mkdir(context.Background(), "a") + require.NoError(t, err) - // create file in dir - err = f.Write(ctx, "avacado/guacamole", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) + // Create a file in the directory + err = f.Write(context.Background(), "a/hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) + require.NoError(t, err) - // check file was created - info, err := f.Stat(ctx, "avacado/guacamole") - require.NoError(t, err) - require.Equal(t, "guacamole", info.Name()) - require.Equal(t, info.IsDir(), false) + // Check file was created + _, err = f.Stat(context.Background(), "a/hello.txt") + assert.NoError(t, err) - // Run rm command - _, _, err = RequireErrorRun(t, "fs", "rm", "dbfs:"+path.Join(tmpDir, "avacado")) - assert.ErrorIs(t, err, fs.ErrInvalid) - assert.ErrorContains(t, err, "directory not empty") + // Run rm command + _, _, err = RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "a")) + assert.ErrorIs(t, err, fs.ErrInvalid) + assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) + }) + } } func TestAccFsRmForNonExistentFile(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Parallel() + + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + _, tmpDir := tc.setupFiler(t) + + // Expect error if file does not exist + _, _, err := RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "does-not-exist")) + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } - // Expect error if file does not exist - _, _, err := RequireErrorRun(t, "fs", "rm", "dbfs:/does-not-exist") - assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccFsRmForNonEmptyDirectoryWithRecursiveFlag(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsRmDirRecursively(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) - // create file in dir - err = f.Write(ctx, "avacado/guacamole", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) + // Create a directory + err := f.Mkdir(context.Background(), "a") + require.NoError(t, err) - // check file was created - info, err := f.Stat(ctx, "avacado/guacamole") - require.NoError(t, err) - require.Equal(t, "guacamole", info.Name()) - require.Equal(t, info.IsDir(), false) + // Create a file in the directory + err = f.Write(context.Background(), "a/hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) + require.NoError(t, err) - // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", "dbfs:"+path.Join(tmpDir, "avacado"), "--recursive") - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) + // Check file was created + _, err = f.Stat(context.Background(), "a/hello.txt") + assert.NoError(t, err) - // assert directory was deleted - _, err = f.Stat(ctx, "avacado") - assert.ErrorIs(t, err, fs.ErrNotExist) + // Run rm command + stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a"), "--recursive") + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) + + // Assert directory was deleted + _, err = f.Stat(context.Background(), "a") + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } } diff --git a/internal/helpers.go b/internal/helpers.go index 6377ae07e..ca5aa25e4 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -5,10 +5,13 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "math/rand" + "net/http" "os" + "path" "path/filepath" "reflect" "strings" @@ -19,8 +22,10 @@ import ( "github.com/databricks/cli/cmd" _ "github.com/databricks/cli/cmd/version" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/files" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -452,6 +457,40 @@ func TemporaryDbfsDir(t *testing.T, w *databricks.WorkspaceClient) string { return path } +// Create a new UC volume in a catalog called "main" in the workspace. +func temporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { + ctx := context.Background() + + // Create a schema + schema, err := w.Schemas.Create(ctx, catalog.CreateSchema{ + CatalogName: "main", + Name: RandomName("test-schema-"), + }) + require.NoError(t, err) + t.Cleanup(func() { + w.Schemas.Delete(ctx, catalog.DeleteSchemaRequest{ + FullName: schema.FullName, + }) + }) + + // Create a volume + volume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{ + CatalogName: "main", + SchemaName: schema.Name, + Name: "my-volume", + VolumeType: catalog.VolumeTypeManaged, + }) + require.NoError(t, err) + t.Cleanup(func() { + w.Volumes.Delete(ctx, catalog.DeleteVolumeRequest{ + Name: volume.FullName, + }) + }) + + return path.Join("/Volumes", "main", schema.Name, volume.Name) + +} + func TemporaryRepo(t *testing.T, w *databricks.WorkspaceClient) string { ctx := context.Background() me, err := w.CurrentUser.Me(ctx) @@ -489,3 +528,62 @@ func GetNodeTypeId(env string) string { } return "Standard_DS4_v2" } + +func setupLocalFiler(t *testing.T) (filer.Filer, string) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + tmp := t.TempDir() + f, err := filer.NewLocalClient(tmp) + require.NoError(t, err) + + return f, path.Join(filepath.ToSlash(tmp)) +} + +func setupWsfsFiler(t *testing.T) (filer.Filer, string) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + tmpdir := TemporaryWorkspaceDir(t, w) + f, err := filer.NewWorkspaceFilesClient(w, tmpdir) + require.NoError(t, err) + + // Check if we can use this API here, skip test if we cannot. + _, err = f.Read(ctx, "we_use_this_call_to_test_if_this_api_is_enabled") + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.StatusCode == http.StatusBadRequest { + t.Skip(aerr.Message) + } + + return f, tmpdir +} + +func setupDbfsFiler(t *testing.T) (filer.Filer, string) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + tmpDir := TemporaryDbfsDir(t, w) + f, err := filer.NewDbfsClient(w, tmpDir) + require.NoError(t, err) + + return f, path.Join("dbfs:/", tmpDir) +} + +func setupUcVolumesFiler(t *testing.T) (filer.Filer, string) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + tmpDir := temporaryUcVolume(t, w) + f, err := filer.NewFilesClient(w, tmpDir) + require.NoError(t, err) + + return f, path.Join("dbfs:/", tmpDir) +} diff --git a/libs/filer/files_client.go b/libs/filer/files_client.go index 17884d573..9fc68bd56 100644 --- a/libs/filer/files_client.go +++ b/libs/filer/files_client.go @@ -11,18 +11,30 @@ import ( "net/url" "path" "slices" + "sort" "strings" "time" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/service/files" + "golang.org/x/sync/errgroup" ) +// As of 19th Feb 2024, the Files API backend has a rate limit of 10 concurrent +// requests and 100 QPS. We limit the number of concurrent requests to 5 to +// avoid hitting the rate limit. +const maxFilesRequestsInFlight = 5 + // Type that implements fs.FileInfo for the Files API. +// This is required for the filer.Stat() method. type filesApiFileInfo struct { - absPath string - isDir bool + absPath string + isDir bool + fileSize int64 + lastModified int64 } func (info filesApiFileInfo) Name() string { @@ -30,8 +42,7 @@ func (info filesApiFileInfo) Name() string { } func (info filesApiFileInfo) Size() int64 { - // No way to get the file size in the Files API. - return 0 + return info.fileSize } func (info filesApiFileInfo) Mode() fs.FileMode { @@ -43,7 +54,7 @@ func (info filesApiFileInfo) Mode() fs.FileMode { } func (info filesApiFileInfo) ModTime() time.Time { - return time.Time{} + return time.UnixMilli(info.lastModified) } func (info filesApiFileInfo) IsDir() bool { @@ -54,6 +65,28 @@ func (info filesApiFileInfo) Sys() any { return nil } +// Type that implements fs.DirEntry for the Files API. +// This is required for the filer.ReadDir() method. +type filesApiDirEntry struct { + i filesApiFileInfo +} + +func (e filesApiDirEntry) Name() string { + return e.i.Name() +} + +func (e filesApiDirEntry) IsDir() bool { + return e.i.IsDir() +} + +func (e filesApiDirEntry) Type() fs.FileMode { + return e.i.Mode() +} + +func (e filesApiDirEntry) Info() (fs.FileInfo, error) { + return e.i, nil +} + // FilesClient implements the [Filer] interface for the Files API backend. type FilesClient struct { workspaceClient *databricks.WorkspaceClient @@ -63,10 +96,6 @@ type FilesClient struct { root WorkspaceRootPath } -func filesNotImplementedError(fn string) error { - return fmt.Errorf("filer.%s is not implemented for the Files API", fn) -} - func NewFilesClient(w *databricks.WorkspaceClient, root string) (Filer, error) { apiClient, err := client.New(w.Config) if err != nil { @@ -102,6 +131,24 @@ func (w *FilesClient) Write(ctx context.Context, name string, reader io.Reader, return err } + // Check that target path exists if CreateParentDirectories mode is not set + if !slices.Contains(mode, CreateParentDirectories) { + err := w.workspaceClient.Files.GetDirectoryMetadataByDirectoryPath(ctx, path.Dir(absPath)) + if err != nil { + var aerr *apierr.APIError + if !errors.As(err, &aerr) { + return err + } + + // This API returns a 404 if the file doesn't exist. + if aerr.StatusCode == http.StatusNotFound { + return NoSuchDirectoryError{path.Dir(absPath)} + } + + return err + } + } + overwrite := slices.Contains(mode, OverwriteIfExists) urlPath = fmt.Sprintf("%s?overwrite=%t", urlPath, overwrite) headers := map[string]string{"Content-Type": "application/octet-stream"} @@ -119,7 +166,7 @@ func (w *FilesClient) Write(ctx context.Context, name string, reader io.Reader, } // This API returns 409 if the file already exists, when the object type is file - if aerr.StatusCode == http.StatusConflict { + if aerr.StatusCode == http.StatusConflict && aerr.ErrorCode == "ALREADY_EXISTS" { return FileAlreadyExistsError{absPath} } @@ -148,14 +195,20 @@ func (w *FilesClient) Read(ctx context.Context, name string) (io.ReadCloser, err // This API returns a 404 if the specified path does not exist. if aerr.StatusCode == http.StatusNotFound { + // Check if the path is a directory. If so, return not a file error. + if _, err := w.statDir(ctx, name); err == nil { + return nil, NotAFile{absPath} + } + + // No file or directory exists at the specified path. Return no such file error. return nil, FileDoesNotExistError{absPath} } return nil, err } -func (w *FilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { - absPath, urlPath, err := w.urlPath(name) +func (w *FilesClient) deleteFile(ctx context.Context, name string) error { + absPath, err := w.root.Join(name) if err != nil { return err } @@ -165,53 +218,232 @@ func (w *FilesClient) Delete(ctx context.Context, name string, mode ...DeleteMod return CannotDeleteRootError{} } - err = w.apiClient.Do(ctx, http.MethodDelete, urlPath, nil, nil, nil) + err = w.workspaceClient.Files.DeleteByFilePath(ctx, absPath) // Return early on success. if err == nil { return nil } - // Special handling of this error only if it is an API error. var aerr *apierr.APIError + // Special handling of this error only if it is an API error. if !errors.As(err, &aerr) { return err } - // This API returns a 404 if the specified path does not exist. + // This files delete API returns a 404 if the specified path does not exist. if aerr.StatusCode == http.StatusNotFound { return FileDoesNotExistError{absPath} } - // This API returns 409 if the underlying path is a directory. - if aerr.StatusCode == http.StatusConflict { + return err +} + +func (w *FilesClient) deleteDirectory(ctx context.Context, name string) error { + absPath, err := w.root.Join(name) + if err != nil { + return err + } + + // Illegal to delete the root path. + if absPath == w.root.rootPath { + return CannotDeleteRootError{} + } + + err = w.workspaceClient.Files.DeleteDirectoryByDirectoryPath(ctx, absPath) + + var aerr *apierr.APIError + // Special handling of this error only if it is an API error. + if !errors.As(err, &aerr) { + return err + } + + // The directory delete API returns a 400 if the directory is not empty + if aerr.StatusCode == http.StatusBadRequest { + reasons := []string{} + for _, detail := range aerr.Details { + reasons = append(reasons, detail.Reason) + } + // Error code 400 is generic and can be returned for other reasons. Make + // sure one of the reasons for the error is that the directory is not empty. + if !slices.Contains(reasons, "FILES_API_DIRECTORY_IS_NOT_EMPTY") { + return err + } return DirectoryNotEmptyError{absPath} } return err } -func (w *FilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { - return nil, filesNotImplementedError("ReadDir") -} +func (w *FilesClient) recursiveDelete(ctx context.Context, name string) error { + filerFS := NewFS(ctx, w) + dirsToDelete := make([]string, 0) + filesToDelete := make([]string, 0) + callback := func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } -func (w *FilesClient) Mkdir(ctx context.Context, name string) error { - // Directories are created implicitly. - // No need to do anything. + // Files API does not allowing deleting non-empty directories. We instead + // collect the directories to delete and delete them once all the files have + // been deleted. + if d.IsDir() { + dirsToDelete = append(dirsToDelete, path) + return nil + } + + filesToDelete = append(filesToDelete, path) + return nil + } + + // Walk the directory and accumulate the files and directories to delete. + err := fs.WalkDir(filerFS, name, callback) + if err != nil { + return err + } + + // Delete the files in parallel. + group, groupCtx := errgroup.WithContext(ctx) + group.SetLimit(maxFilesRequestsInFlight) + + for _, file := range filesToDelete { + file := file + + // Skip the file if the context has already been cancelled. + select { + case <-groupCtx.Done(): + continue + default: + // Proceed. + } + + group.Go(func() error { + return w.deleteFile(groupCtx, file) + }) + } + + // Wait for the files to be deleted and return the first non-nil error. + err = group.Wait() + if err != nil { + return err + } + + // Delete the directories in reverse order to ensure that the parent + // directories are deleted after the children. This is possible because + // fs.WalkDir walks the directories in lexicographical order. + for i := len(dirsToDelete) - 1; i >= 0; i-- { + err := w.deleteDirectory(ctx, dirsToDelete[i]) + if err != nil { + return err + } + } return nil } -func (w *FilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { - absPath, urlPath, err := w.urlPath(name) +func (w *FilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { + if slices.Contains(mode, DeleteRecursively) { + return w.recursiveDelete(ctx, name) + } + + // Issue a stat call to determine if the path is a file or directory. + info, err := w.Stat(ctx, name) + if err != nil { + return err + } + + // Issue the delete call for a directory + if info.IsDir() { + return w.deleteDirectory(ctx, name) + } + + return w.deleteFile(ctx, name) +} + +func (w *FilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { + absPath, err := w.root.Join(name) if err != nil { return nil, err } - err = w.apiClient.Do(ctx, http.MethodHead, urlPath, nil, nil, nil) + iter := w.workspaceClient.Files.ListDirectoryContents(ctx, files.ListDirectoryContentsRequest{ + DirectoryPath: absPath, + }) + + files, err := listing.ToSlice(ctx, iter) + + // Return early on success. + if err == nil { + entries := make([]fs.DirEntry, len(files)) + for i, file := range files { + entries[i] = filesApiDirEntry{ + i: filesApiFileInfo{ + absPath: file.Path, + isDir: file.IsDirectory, + fileSize: file.FileSize, + lastModified: file.LastModified, + }, + } + } + + // Sort by name for parity with os.ReadDir. + sort.Slice(entries, func(i, j int) bool { return entries[i].Name() < entries[j].Name() }) + return entries, nil + } + + // Special handling of this error only if it is an API error. + var apierr *apierr.APIError + if !errors.As(err, &apierr) { + return nil, err + } + + // This API returns a 404 if the specified path does not exist. + if apierr.StatusCode == http.StatusNotFound { + // Check if the path is a file. If so, return not a directory error. + if _, err := w.statFile(ctx, name); err == nil { + return nil, NotADirectory{absPath} + } + + // No file or directory exists at the specified path. Return no such directory error. + return nil, NoSuchDirectoryError{absPath} + } + return nil, err +} + +func (w *FilesClient) Mkdir(ctx context.Context, name string) error { + absPath, err := w.root.Join(name) + if err != nil { + return err + } + + err = w.workspaceClient.Files.CreateDirectory(ctx, files.CreateDirectoryRequest{ + DirectoryPath: absPath, + }) + + // Special handling of this error only if it is an API error. + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.StatusCode == http.StatusConflict { + return FileAlreadyExistsError{absPath} + } + + return err +} + +// Get file metadata for a file using the Files API. +func (w *FilesClient) statFile(ctx context.Context, name string) (fs.FileInfo, error) { + absPath, err := w.root.Join(name) + if err != nil { + return nil, err + } + + fileInfo, err := w.workspaceClient.Files.GetMetadataByFilePath(ctx, absPath) // If the HEAD requests succeeds, the file exists. if err == nil { - return filesApiFileInfo{absPath: absPath, isDir: false}, nil + return filesApiFileInfo{ + absPath: absPath, + isDir: false, + fileSize: fileInfo.ContentLength, + }, nil } // Special handling of this error only if it is an API error. @@ -225,10 +457,51 @@ func (w *FilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error return nil, FileDoesNotExistError{absPath} } - // This API returns 409 if the underlying path is a directory. - if aerr.StatusCode == http.StatusConflict { + return nil, err +} + +// Get file metadata for a directory using the Files API. +func (w *FilesClient) statDir(ctx context.Context, name string) (fs.FileInfo, error) { + absPath, err := w.root.Join(name) + if err != nil { + return nil, err + } + + err = w.workspaceClient.Files.GetDirectoryMetadataByDirectoryPath(ctx, absPath) + + // If the HEAD requests succeeds, the directory exists. + if err == nil { return filesApiFileInfo{absPath: absPath, isDir: true}, nil } + // Special handling of this error only if it is an API error. + var aerr *apierr.APIError + if !errors.As(err, &aerr) { + return nil, err + } + + // The directory metadata API returns a 404 if the specified path does not exist. + if aerr.StatusCode == http.StatusNotFound { + return nil, NoSuchDirectoryError{absPath} + } + return nil, err } + +func (w *FilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { + // Assume that the path is a directory and issue a stat call. + dirInfo, err := w.statDir(ctx, name) + + // If the file exists, return early. + if err == nil { + return dirInfo, nil + } + + // Return early if the error is not a NoSuchDirectoryError. + if !errors.As(err, &NoSuchDirectoryError{}) { + return nil, err + } + + // Since the path is not a directory, assume that it is a file and issue a stat call. + return w.statFile(ctx, name) +} From 26833418c37660d3df0d712b747c6e6136633e13 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 20 Feb 2024 21:53:59 +0100 Subject: [PATCH 047/286] Release v0.214.0 (#1226) CLI: * Add support for UC Volumes to the `databricks fs` commands ([#1209](https://github.com/databricks/cli/pull/1209)). Bundles: * Use dynamic configuration model in bundles ([#1098](https://github.com/databricks/cli/pull/1098)). * Allow use of variables references in primitive non-string fields ([#1219](https://github.com/databricks/cli/pull/1219)). * Add an experimental default-sql template ([#1051](https://github.com/databricks/cli/pull/1051)). * Add an experimental dbt-sql template ([#1059](https://github.com/databricks/cli/pull/1059)). Internal: * Add fork-user to winget release workflow ([#1214](https://github.com/databricks/cli/pull/1214)). * Use `any` as type for data sources and resources in `tf/schema` ([#1216](https://github.com/databricks/cli/pull/1216)). * Avoid infinite recursion when normalizing a recursive type ([#1213](https://github.com/databricks/cli/pull/1213)). * Fix issue where interpolating a new ref would rewrite unrelated fields ([#1217](https://github.com/databricks/cli/pull/1217)). * Use `dyn.Value` as input to generating Terraform JSON ([#1218](https://github.com/databricks/cli/pull/1218)). API Changes: * Changed `databricks lakehouse-monitors update` command with new required argument order. * Added `databricks online-tables` command group. OpenAPI commit cdd76a98a4fca7008572b3a94427566dd286c63b (2024-02-19) Dependency updates: * Bump Terraform provider to v1.36.2 ([#1215](https://github.com/databricks/cli/pull/1215)). * Bump github.com/databricks/databricks-sdk-go from 0.32.0 to 0.33.0 ([#1222](https://github.com/databricks/cli/pull/1222)). --- CHANGELOG.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f68ceabca..88dbc71bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Version changelog +## 0.214.0 + +CLI: + * Add support for UC Volumes to the `databricks fs` commands ([#1209](https://github.com/databricks/cli/pull/1209)). + +Bundles: + * Use dynamic configuration model in bundles ([#1098](https://github.com/databricks/cli/pull/1098)). + * Allow use of variables references in primitive non-string fields ([#1219](https://github.com/databricks/cli/pull/1219)). + * Add an experimental default-sql template ([#1051](https://github.com/databricks/cli/pull/1051)). + * Add an experimental dbt-sql template ([#1059](https://github.com/databricks/cli/pull/1059)). + +Internal: + * Add fork-user to winget release workflow ([#1214](https://github.com/databricks/cli/pull/1214)). + * Use `any` as type for data sources and resources in `tf/schema` ([#1216](https://github.com/databricks/cli/pull/1216)). + * Avoid infinite recursion when normalizing a recursive type ([#1213](https://github.com/databricks/cli/pull/1213)). + * Fix issue where interpolating a new ref would rewrite unrelated fields ([#1217](https://github.com/databricks/cli/pull/1217)). + * Use `dyn.Value` as input to generating Terraform JSON ([#1218](https://github.com/databricks/cli/pull/1218)). + +API Changes: + * Changed `databricks lakehouse-monitors update` command with new required argument order. + * Added `databricks online-tables` command group. + +OpenAPI commit cdd76a98a4fca7008572b3a94427566dd286c63b (2024-02-19) +Dependency updates: + * Bump Terraform provider to v1.36.2 ([#1215](https://github.com/databricks/cli/pull/1215)). + * Bump github.com/databricks/databricks-sdk-go from 0.32.0 to 0.33.0 ([#1222](https://github.com/databricks/cli/pull/1222)). + ## 0.213.0 CLI: From 4ac1c1655b1736c00143c528cbbdba2baa190286 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 21 Feb 2024 18:36:03 +0530 Subject: [PATCH 048/286] Fix CLI nightlies on our UC workspaces (#1225) This PR fixes some test helper functions so that they work properly on our test environments for AWS and Azure UC workspaces. --- internal/helpers.go | 3 ++- internal/testutil/cloud.go | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/helpers.go b/internal/helpers.go index ca5aa25e4..49dc9f4ca 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -523,7 +523,8 @@ func TemporaryRepo(t *testing.T, w *databricks.WorkspaceClient) string { func GetNodeTypeId(env string) string { if env == "gcp" { return "n1-standard-4" - } else if env == "aws" { + } else if env == "aws" || env == "ucws" { + // aws-prod-ucws has CLOUD_ENV set to "ucws" return "i3.xlarge" } return "Standard_DS4_v2" diff --git a/internal/testutil/cloud.go b/internal/testutil/cloud.go index 50bbf67f2..e547069f3 100644 --- a/internal/testutil/cloud.go +++ b/internal/testutil/cloud.go @@ -41,6 +41,9 @@ func GetCloud(t *testing.T) Cloud { return Azure case "gcp": return GCP + // CLOUD_ENV is set to "ucws" in the "aws-prod-ucws" test environment + case "ucws": + return AWS default: t.Fatalf("Unknown cloud environment: %s", env) } From 5309e0fc2a488f759010e049218d1e94fbf62949 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 21 Feb 2024 15:15:26 +0100 Subject: [PATCH 049/286] Improved error message when no .databrickscfg (#1223) ## Changes Fixes #1060 --- cmd/root/auth.go | 4 ++-- libs/databrickscfg/profiles.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/root/auth.go b/cmd/root/auth.go index a2cdd04fe..89c7641c5 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -189,7 +189,7 @@ func AskForWorkspaceProfile(ctx context.Context) (string, error) { } switch len(profiles) { case 0: - return "", fmt.Errorf("%s does not contain workspace profiles; please create one first", path) + return "", fmt.Errorf("%s does not contain workspace profiles; please create one by running 'databricks configure'", path) case 1: return profiles[0].Name, nil } @@ -222,7 +222,7 @@ func AskForAccountProfile(ctx context.Context) (string, error) { } switch len(profiles) { case 0: - return "", fmt.Errorf("%s does not contain account profiles; please create one first", path) + return "", fmt.Errorf("%s does not contain account profiles; please create one by running 'databricks configure'", path) case 1: return profiles[0].Name, nil } diff --git a/libs/databrickscfg/profiles.go b/libs/databrickscfg/profiles.go index c7bb27195..200ac9c87 100644 --- a/libs/databrickscfg/profiles.go +++ b/libs/databrickscfg/profiles.go @@ -95,7 +95,7 @@ func Get(ctx context.Context) (*config.File, error) { configFile, err := config.LoadFile(path) if errors.Is(err, fs.ErrNotExist) { // downstreams depend on ErrNoConfiguration. TODO: expose this error through SDK - return nil, fmt.Errorf("%w at %s; please create one first", ErrNoConfiguration, path) + return nil, fmt.Errorf("%w at %s; please create one by running 'databricks configure'", ErrNoConfiguration, path) } else if err != nil { return nil, err } From b65ce75c1f8de521b1b988e409d6131c771ffc33 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 21 Feb 2024 15:16:36 +0100 Subject: [PATCH 050/286] Use Go SDK Iterators when listing resources with the CLI (#1202) ## Changes Currently, when the CLI run a list API call (like list jobs), it uses the `List*All` methods from the SDK, which list all resources in the collection. This is very slow for large collections: if you need to list all jobs from a workspace that has 10,000+ jobs, you'll be waiting for at least 100 RPCs to complete before seeing any output. Instead of using List*All() methods, the SDK recently added an iterator data structure that allows traversing the collection without needing to completely list it first. New pages are fetched lazily if the next requested item belongs to the next page. Using the List() methods that return these iterators, the CLI can proactively print out some of the response before the complete collection has been fetched. This involves a pretty major rewrite of the rendering logic in `cmdio`. The idea there is to define custom rendering logic based on the type of the provided resource. There are three renderer interfaces: 1. textRenderer: supports printing something in a textual format (i.e. not JSON, and not templated). 2. jsonRenderer: supports printing something in a pretty-printed JSON format. 3. templateRenderer: supports printing something using a text template. There are also three renderer implementations: 1. readerRenderer: supports printing a reader. This only implements the textRenderer interface. 2. iteratorRenderer: supports printing a `listing.Iterator` from the Go SDK. This implements jsonRenderer and templateRenderer, buffering 20 resources at a time before writing them to the output. 3. defaultRenderer: supports printing arbitrary resources (the previous implementation). Callers will either use `cmdio.Render()` for rendering individual resources or `io.Reader` or `cmdio.RenderIterator()` for rendering an iterator. This separate method is needed to safely be able to match on the type of the iterator, since Go does not allow runtime type matches on generic types with an existential type parameter. One other change that needs to happen is to split the templates used for text representation of list resources into a header template and a row template. The template is now executed multiple times for List API calls, but the header should only be printed once. To support this, I have added `headerTemplate` to `cmdIO`, and I have also changed `RenderWithTemplate` to include a `headerTemplate` parameter everywhere. ## Tests - [x] Unit tests for text rendering logic - [x] Unit test for reflection-based iterator construction. --------- Co-authored-by: Andrew Nester --- .codegen/service.go.tmpl | 12 +- bundle/schema/docs/bundle_descriptions.json | 92 +++--- cmd/account/billable-usage/billable-usage.go | 2 +- cmd/account/budgets/budgets.go | 7 +- .../custom-app-integration.go | 7 +- cmd/account/groups/groups.go | 7 +- .../ip-access-lists/ip-access-lists.go | 7 +- cmd/account/log-delivery/log-delivery.go | 7 +- .../metastore-assignments.go | 7 +- cmd/account/metastores/metastores.go | 7 +- .../network-connectivity.go | 14 +- .../o-auth-published-apps.go | 7 +- .../published-app-integration.go | 7 +- .../service-principal-secrets.go | 7 +- .../service-principals/service-principals.go | 7 +- cmd/account/users/users.go | 7 +- .../workspace-assignment.go | 7 +- cmd/fs/cat.go | 2 +- cmd/fs/cp.go | 4 +- cmd/fs/ls.go | 4 +- cmd/labs/project/proxy.go | 2 +- cmd/root/io.go | 5 +- cmd/workspace/catalogs/catalogs.go | 7 +- cmd/workspace/catalogs/overrides.go | 3 +- cmd/workspace/clean-rooms/clean-rooms.go | 7 +- .../cluster-policies/cluster-policies.go | 7 +- cmd/workspace/clusters/clusters.go | 14 +- cmd/workspace/clusters/overrides.go | 3 +- cmd/workspace/connections/connections.go | 7 +- cmd/workspace/dashboards/dashboards.go | 7 +- cmd/workspace/dashboards/overrides.go | 3 +- cmd/workspace/experiments/experiments.go | 35 +-- .../external-locations/external-locations.go | 7 +- cmd/workspace/external-locations/overrides.go | 3 +- cmd/workspace/functions/functions.go | 7 +- .../git-credentials/git-credentials.go | 7 +- .../global-init-scripts.go | 7 +- cmd/workspace/groups/groups.go | 7 +- .../instance-pools/instance-pools.go | 7 +- .../instance-profiles/instance-profiles.go | 7 +- .../ip-access-lists/ip-access-lists.go | 7 +- cmd/workspace/jobs/jobs.go | 14 +- cmd/workspace/jobs/overrides.go | 3 +- cmd/workspace/libraries/libraries.go | 7 +- cmd/workspace/metastores/metastores.go | 7 +- cmd/workspace/metastores/overrides.go | 3 +- .../model-registry/model-registry.go | 42 +-- .../model-versions/model-versions.go | 7 +- cmd/workspace/pipelines/pipelines.go | 14 +- .../policy-families/policy-families.go | 7 +- cmd/workspace/providers/providers.go | 14 +- cmd/workspace/queries/overrides.go | 3 +- cmd/workspace/queries/queries.go | 7 +- cmd/workspace/query-history/query-history.go | 7 +- cmd/workspace/recipients/recipients.go | 7 +- .../registered-models/registered-models.go | 7 +- cmd/workspace/repos/repos.go | 7 +- cmd/workspace/schemas/overrides.go | 3 +- cmd/workspace/schemas/schemas.go | 7 +- cmd/workspace/secrets/overrides.go | 6 +- cmd/workspace/secrets/secrets.go | 21 +- .../service-principals/service-principals.go | 7 +- .../serving-endpoints/serving-endpoints.go | 7 +- cmd/workspace/shares/shares.go | 7 +- .../storage-credentials/overrides.go | 3 +- .../storage-credentials.go | 7 +- .../system-schemas/system-schemas.go | 7 +- cmd/workspace/tables/overrides.go | 3 +- cmd/workspace/tables/tables.go | 14 +- cmd/workspace/token-management/overrides.go | 3 +- .../token-management/token-management.go | 7 +- cmd/workspace/tokens/overrides.go | 3 +- cmd/workspace/tokens/tokens.go | 7 +- cmd/workspace/users/users.go | 7 +- .../vector-search-endpoints.go | 7 +- .../vector-search-indexes.go | 7 +- cmd/workspace/volumes/volumes.go | 7 +- cmd/workspace/warehouses/overrides.go | 3 +- cmd/workspace/warehouses/warehouses.go | 7 +- cmd/workspace/workspace/export_dir.go | 4 +- cmd/workspace/workspace/import_dir.go | 4 +- cmd/workspace/workspace/overrides.go | 3 +- cmd/workspace/workspace/workspace.go | 7 +- internal/bundle/helpers.go | 2 +- libs/cmdio/io.go | 70 +---- libs/cmdio/render.go | 261 +++++++++++++++++- libs/cmdio/render_test.go | 190 +++++++++++++ .../databrickscfg/cfgpickers/clusters_test.go | 4 +- libs/template/helpers_test.go | 2 +- 89 files changed, 714 insertions(+), 519 deletions(-) create mode 100644 libs/cmdio/render_test.go diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index a0cd02198..ad25135ae 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -300,16 +300,22 @@ func init() { // end service {{.Name}}{{end}} {{- define "method-call" -}} - {{if .Response}}response, err :={{else}}err ={{end}} {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.PascalName}}{{if .Pagination}}All{{end}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}}) + {{if .Response -}} + response{{ if not .Pagination}}, err{{end}} := + {{- else -}} + err = + {{- end}} {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}}) + {{- if not (and .Response .Pagination) }} if err != nil { return err } + {{- end}} {{ if .Response -}} {{- if .IsResponseByteStream -}} defer response.{{.ResponseBodyField.PascalName}}.Close() - return cmdio.RenderReader(ctx, response.{{.ResponseBodyField.PascalName}}) + return cmdio.Render{{ if .Pagination}}Iterator{{end}}(ctx, response.{{.ResponseBodyField.PascalName}}) {{- else -}} - return cmdio.Render(ctx, response) + return cmdio.Render{{ if .Pagination}}Iterator{{end}}(ctx, response) {{- end -}} {{ else -}} return nil diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 5b63bb6d2..982dd4eb7 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -1,8 +1,8 @@ { - "description": "Root of the bundle config", + "description": "", "properties": { "artifacts": { - "description": "A description of all code artifacts in this bundle.", + "description": "", "additionalproperties": { "description": "", "properties": { @@ -33,7 +33,7 @@ } }, "bundle": { - "description": "The details for this bundle.", + "description": "", "properties": { "compute_id": { "description": "" @@ -58,7 +58,7 @@ } }, "name": { - "description": "The name of the bundle." + "description": "" } } }, @@ -77,7 +77,7 @@ } }, "include": { - "description": "A list of glob patterns of files to load and merge into the this configuration. Defaults to no files being included.", + "description": "", "items": { "description": "" } @@ -193,7 +193,7 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." } } }, @@ -322,7 +322,7 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "If new_cluster, a description of a cluster that is created for only for this task.", "properties": { "apply_policy_default_values": { "description": "" @@ -725,7 +725,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -785,7 +785,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -930,7 +930,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "If new_cluster, a description of a cluster that is created for only for this task.", "properties": { "apply_policy_default_values": { "description": "" @@ -1269,7 +1269,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -1371,7 +1371,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -1449,7 +1449,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -1551,7 +1551,7 @@ } }, "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." }, "table": { "description": "Table trigger settings.", @@ -2535,7 +2535,7 @@ "description": "", "properties": { "artifacts": { - "description": "A description of all code artifacts in this bundle.", + "description": "", "additionalproperties": { "description": "", "properties": { @@ -2566,7 +2566,7 @@ } }, "bundle": { - "description": "The details for this bundle.", + "description": "", "properties": { "compute_id": { "description": "" @@ -2591,7 +2591,7 @@ } }, "name": { - "description": "The name of the bundle." + "description": "" } } }, @@ -2726,7 +2726,7 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." } } }, @@ -2855,7 +2855,7 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "If new_cluster, a description of a cluster that is created for only for this task.", "properties": { "apply_policy_default_values": { "description": "" @@ -3258,7 +3258,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -3318,7 +3318,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -3463,7 +3463,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "If new_cluster, a description of a cluster that is created for only for this task.", "properties": { "apply_policy_default_values": { "description": "" @@ -3802,7 +3802,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -3904,7 +3904,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -3982,7 +3982,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in \u003cDatabricks\u003e workspace.\n* `GIT`: SQL file is located in cloud Git provider.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -4084,7 +4084,7 @@ } }, "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." }, "table": { "description": "Table trigger settings.", @@ -5115,10 +5115,10 @@ } }, "workspace": { - "description": "Configures which workspace to connect to and locations for files, state, and similar locations within the workspace file tree.", + "description": "", "properties": { "artifact_path": { - "description": "The remote path to synchronize build artifacts to. This defaults to `${workspace.root}/artifacts`" + "description": "" }, "auth_type": { "description": "" @@ -5127,10 +5127,10 @@ "description": "" }, "azure_environment": { - "description": "Azure environment, one of (Public, UsGov, China, Germany)." + "description": "" }, "azure_login_app_id": { - "description": "Azure Login Application ID." + "description": "" }, "azure_tenant_id": { "description": "" @@ -5139,28 +5139,28 @@ "description": "" }, "azure_workspace_resource_id": { - "description": "Azure Resource Manager ID for Azure Databricks workspace." + "description": "" }, "client_id": { "description": "" }, "file_path": { - "description": "The remote path to synchronize local files artifacts to. This defaults to `${workspace.root}/files`" + "description": "" }, "google_service_account": { "description": "" }, "host": { - "description": "Host url of the workspace." + "description": "" }, "profile": { - "description": "Connection profile to use. By default profiles are specified in ~/.databrickscfg." + "description": "" }, "root_path": { - "description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.target}`" + "description": "" }, "state_path": { - "description": "The remote path to synchronize bundle state to. This defaults to `${workspace.root}/state`" + "description": "" } } } @@ -5220,10 +5220,10 @@ } }, "workspace": { - "description": "Configures which workspace to connect to and locations for files, state, and similar locations within the workspace file tree.", + "description": "", "properties": { "artifact_path": { - "description": "The remote path to synchronize build artifacts to. This defaults to `${workspace.root}/artifacts`" + "description": "" }, "auth_type": { "description": "" @@ -5232,10 +5232,10 @@ "description": "" }, "azure_environment": { - "description": "Azure environment, one of (Public, UsGov, China, Germany)." + "description": "" }, "azure_login_app_id": { - "description": "Azure Login Application ID." + "description": "" }, "azure_tenant_id": { "description": "" @@ -5244,28 +5244,28 @@ "description": "" }, "azure_workspace_resource_id": { - "description": "Azure Resource Manager ID for Azure Databricks workspace." + "description": "" }, "client_id": { "description": "" }, "file_path": { - "description": "The remote path to synchronize local files artifacts to. This defaults to `${workspace.root}/files`" + "description": "" }, "google_service_account": { "description": "" }, "host": { - "description": "Host url of the workspace." + "description": "" }, "profile": { - "description": "Connection profile to use. By default profiles are specified in ~/.databrickscfg." + "description": "" }, "root_path": { - "description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.target}`" + "description": "" }, "state_path": { - "description": "The remote path to synchronize bundle state to. This defaults to `${workspace.root}/state`" + "description": "" } } } diff --git a/cmd/account/billable-usage/billable-usage.go b/cmd/account/billable-usage/billable-usage.go index ec9b7a639..bbbc9af23 100755 --- a/cmd/account/billable-usage/billable-usage.go +++ b/cmd/account/billable-usage/billable-usage.go @@ -92,7 +92,7 @@ func newDownload() *cobra.Command { return err } defer response.Contents.Close() - return cmdio.RenderReader(ctx, response.Contents) + return cmdio.Render(ctx, response.Contents) } // Disable completions since they are not applicable. diff --git a/cmd/account/budgets/budgets.go b/cmd/account/budgets/budgets.go index 69237900e..dfa2f6bc4 100755 --- a/cmd/account/budgets/budgets.go +++ b/cmd/account/budgets/budgets.go @@ -281,11 +281,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.Budgets.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.Budgets.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index e6d216dfc..79c0f8373 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -262,11 +262,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.CustomAppIntegration.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.CustomAppIntegration.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index ed1fa1642..a068fba45 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -314,11 +314,8 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.Groups.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.Groups.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/ip-access-lists/ip-access-lists.go b/cmd/account/ip-access-lists/ip-access-lists.go index 20511265d..dd836c90a 100755 --- a/cmd/account/ip-access-lists/ip-access-lists.go +++ b/cmd/account/ip-access-lists/ip-access-lists.go @@ -339,11 +339,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.IpAccessLists.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.IpAccessLists.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index 1846e0fdc..eed8942b8 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -303,11 +303,8 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.LogDelivery.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.LogDelivery.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/metastore-assignments/metastore-assignments.go b/cmd/account/metastore-assignments/metastore-assignments.go index 619bde507..b1d0508b3 100755 --- a/cmd/account/metastore-assignments/metastore-assignments.go +++ b/cmd/account/metastore-assignments/metastore-assignments.go @@ -294,11 +294,8 @@ func newList() *cobra.Command { listReq.MetastoreId = args[0] - response, err := a.MetastoreAssignments.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.MetastoreAssignments.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/metastores/metastores.go b/cmd/account/metastores/metastores.go index 797bef5ec..e8b7c8f70 100755 --- a/cmd/account/metastores/metastores.go +++ b/cmd/account/metastores/metastores.go @@ -257,11 +257,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.Metastores.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.Metastores.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index 27ab31743..bfe116f28 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -546,11 +546,8 @@ func newListNetworkConnectivityConfigurations() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.NetworkConnectivity.ListNetworkConnectivityConfigurationsAll(ctx, listNetworkConnectivityConfigurationsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.NetworkConnectivity.ListNetworkConnectivityConfigurations(ctx, listNetworkConnectivityConfigurationsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -612,11 +609,8 @@ func newListPrivateEndpointRules() *cobra.Command { listPrivateEndpointRulesReq.NetworkConnectivityConfigId = args[0] - response, err := a.NetworkConnectivity.ListPrivateEndpointRulesAll(ctx, listPrivateEndpointRulesReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.NetworkConnectivity.ListPrivateEndpointRules(ctx, listPrivateEndpointRulesReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/o-auth-published-apps/o-auth-published-apps.go b/cmd/account/o-auth-published-apps/o-auth-published-apps.go index b611724d4..1ce363ac9 100755 --- a/cmd/account/o-auth-published-apps/o-auth-published-apps.go +++ b/cmd/account/o-auth-published-apps/o-auth-published-apps.go @@ -72,11 +72,8 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.OAuthPublishedApps.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.OAuthPublishedApps.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/published-app-integration/published-app-integration.go b/cmd/account/published-app-integration/published-app-integration.go index d3209c670..54cf63371 100755 --- a/cmd/account/published-app-integration/published-app-integration.go +++ b/cmd/account/published-app-integration/published-app-integration.go @@ -262,11 +262,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.PublishedAppIntegration.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.PublishedAppIntegration.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/service-principal-secrets/service-principal-secrets.go b/cmd/account/service-principal-secrets/service-principal-secrets.go index 19d6a491d..1a646e25c 100755 --- a/cmd/account/service-principal-secrets/service-principal-secrets.go +++ b/cmd/account/service-principal-secrets/service-principal-secrets.go @@ -226,11 +226,8 @@ func newList() *cobra.Command { return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) } - response, err := a.ServicePrincipalSecrets.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.ServicePrincipalSecrets.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index 80f1bf461..af18d5341 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -313,11 +313,8 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.ServicePrincipals.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.ServicePrincipals.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index 551766e88..f5b81f219 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -329,11 +329,8 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.Users.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.Users.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index 7780d90f4..ab82cd39f 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -219,11 +219,8 @@ func newList() *cobra.Command { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) } - response, err := a.WorkspaceAssignment.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.WorkspaceAssignment.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/fs/cat.go b/cmd/fs/cat.go index be1866538..df94d1d73 100644 --- a/cmd/fs/cat.go +++ b/cmd/fs/cat.go @@ -27,7 +27,7 @@ func newCatCommand() *cobra.Command { if err != nil { return err } - return cmdio.RenderReader(ctx, r) + return cmdio.Render(ctx, r) } return cmd diff --git a/cmd/fs/cp.go b/cmd/fs/cp.go index f0f480fec..1ba0daf0c 100644 --- a/cmd/fs/cp.go +++ b/cmd/fs/cp.go @@ -107,7 +107,7 @@ func (c *copy) emitFileSkippedEvent(sourcePath, targetPath string) error { event := newFileSkippedEvent(fullSourcePath, fullTargetPath) template := "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n" - return cmdio.RenderWithTemplate(c.ctx, event, template) + return cmdio.RenderWithTemplate(c.ctx, event, "", template) } func (c *copy) emitFileCopiedEvent(sourcePath, targetPath string) error { @@ -123,7 +123,7 @@ func (c *copy) emitFileCopiedEvent(sourcePath, targetPath string) error { event := newFileCopiedEvent(fullSourcePath, fullTargetPath) template := "{{.SourcePath}} -> {{.TargetPath}}\n" - return cmdio.RenderWithTemplate(c.ctx, event, template) + return cmdio.RenderWithTemplate(c.ctx, event, "", template) } func newCpCommand() *cobra.Command { diff --git a/cmd/fs/ls.go b/cmd/fs/ls.go index be52b9289..1d9ee876a 100644 --- a/cmd/fs/ls.go +++ b/cmd/fs/ls.go @@ -78,12 +78,12 @@ func newLsCommand() *cobra.Command { // Use template for long mode if the flag is set if long { - return cmdio.RenderWithTemplate(ctx, jsonDirEntries, cmdio.Heredoc(` + return cmdio.RenderWithTemplate(ctx, jsonDirEntries, "", cmdio.Heredoc(` {{range .}}{{if .IsDir}}DIRECTORY {{else}}FILE {{end}}{{.Size}} {{.ModTime|pretty_date}} {{.Name}} {{end}} `)) } - return cmdio.RenderWithTemplate(ctx, jsonDirEntries, cmdio.Heredoc(` + return cmdio.RenderWithTemplate(ctx, jsonDirEntries, "", cmdio.Heredoc(` {{range .}}{{.Name}} {{end}} `)) diff --git a/cmd/labs/project/proxy.go b/cmd/labs/project/proxy.go index d872560a5..ee1b0aa91 100644 --- a/cmd/labs/project/proxy.go +++ b/cmd/labs/project/proxy.go @@ -87,7 +87,7 @@ func (cp *proxy) renderJsonAsTable(cmd *cobra.Command, args []string, envs map[s } // IntelliJ eagerly replaces tabs with spaces, even though we're not asking for it fixedTemplate := strings.ReplaceAll(cp.TableTemplate, "\\t", "\t") - return cmdio.RenderWithTemplate(ctx, anyVal, fixedTemplate) + return cmdio.RenderWithTemplate(ctx, anyVal, "", fixedTemplate) } func (cp *proxy) commandInput(cmd *cobra.Command) ([]string, error) { diff --git a/cmd/root/io.go b/cmd/root/io.go index 23c7d6c64..b224bbb27 100644 --- a/cmd/root/io.go +++ b/cmd/root/io.go @@ -38,13 +38,14 @@ func OutputType(cmd *cobra.Command) flags.Output { } func (f *outputFlag) initializeIO(cmd *cobra.Command) error { - var template string + var headerTemplate, template string if cmd.Annotations != nil { // rely on zeroval being an empty string template = cmd.Annotations["template"] + headerTemplate = cmd.Annotations["headerTemplate"] } - cmdIO := cmdio.NewIO(f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), template) + cmdIO := cmdio.NewIO(f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), headerTemplate, template) ctx := cmdio.InContext(cmd.Context(), cmdIO) cmd.SetContext(ctx) return nil diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 6ffe4a395..8e639023f 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -292,11 +292,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Catalogs.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Catalogs.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/catalogs/overrides.go b/cmd/workspace/catalogs/overrides.go index 6de7a7771..9ab1bf052 100644 --- a/cmd/workspace/catalogs/overrides.go +++ b/cmd/workspace/catalogs/overrides.go @@ -6,8 +6,9 @@ import ( ) func listOverride(listCmd *cobra.Command) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Name"}} {{header "Type"}} {{header "Comment"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Name"}} {{header "Type"}} {{header "Comment"}} {{range .}}{{.Name|green}} {{blue "%s" .CatalogType}} {{.Comment}} {{end}}`) } diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index cac5de34c..4cee2ce6c 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -282,11 +282,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.CleanRooms.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.CleanRooms.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 65f1af57b..f6edee2b3 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -603,11 +603,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ClusterPolicies.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ClusterPolicies.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index b009a1f59..cf35b2837 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -653,11 +653,8 @@ func newEvents() *cobra.Command { eventsReq.ClusterId = args[0] } - response, err := w.Clusters.EventsAll(ctx, eventsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Clusters.Events(ctx, eventsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -957,11 +954,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Clusters.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Clusters.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/clusters/overrides.go b/cmd/workspace/clusters/overrides.go index ab32a4cd8..55976d406 100644 --- a/cmd/workspace/clusters/overrides.go +++ b/cmd/workspace/clusters/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, _ *compute.ListClustersRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}} {{header "State"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{header "State"}} {{range .}}{{.ClusterId | green}} {{.ClusterName | cyan}} {{if eq .State "RUNNING"}}{{green "%s" .State}}{{else if eq .State "TERMINATED"}}{{red "%s" .State}}{{else}}{{blue "%s" .State}}{{end}} {{end}}`) } diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index e28004c0d..f740c7789 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -293,11 +293,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Connections.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Connections.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index 34bbb28b4..e07f73926 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -293,11 +293,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Dashboards.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Dashboards.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/dashboards/overrides.go b/cmd/workspace/dashboards/overrides.go index 709e657f8..6a26ebbfb 100644 --- a/cmd/workspace/dashboards/overrides.go +++ b/cmd/workspace/dashboards/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, _ *sql.ListDashboardsRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{range .}}{{.Id|green}} {{.Name}} {{end}}`) } diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 7bd28938c..368ec7f94 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -733,11 +733,8 @@ func newGetHistory() *cobra.Command { getHistoryReq.MetricKey = args[0] - response, err := w.Experiments.GetHistoryAll(ctx, getHistoryReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Experiments.GetHistory(ctx, getHistoryReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -998,11 +995,8 @@ func newListArtifacts() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Experiments.ListArtifactsAll(ctx, listArtifactsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Experiments.ListArtifacts(ctx, listArtifactsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1061,11 +1055,8 @@ func newListExperiments() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Experiments.ListExperimentsAll(ctx, listExperimentsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Experiments.ListExperiments(ctx, listExperimentsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1842,11 +1833,8 @@ func newSearchExperiments() *cobra.Command { } } - response, err := w.Experiments.SearchExperimentsAll(ctx, searchExperimentsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Experiments.SearchExperiments(ctx, searchExperimentsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1919,11 +1907,8 @@ func newSearchRuns() *cobra.Command { } } - response, err := w.Experiments.SearchRunsAll(ctx, searchRunsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Experiments.SearchRuns(ctx, searchRunsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index b4166086d..7ddc0d842 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -319,11 +319,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ExternalLocations.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ExternalLocations.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/external-locations/overrides.go b/cmd/workspace/external-locations/overrides.go index 63a30cfc3..00b4921d4 100644 --- a/cmd/workspace/external-locations/overrides.go +++ b/cmd/workspace/external-locations/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *catalog.ListExternalLocationsRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Name"}} {{header "Credential"}} {{header "URL"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Name"}} {{header "Credential"}} {{header "URL"}} {{range .}}{{.Name|green}} {{.CredentialName|cyan}} {{.Url}} {{end}}`) } diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index 35356be0f..d1db1ec97 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -327,11 +327,8 @@ func newList() *cobra.Command { listReq.CatalogName = args[0] listReq.SchemaName = args[1] - response, err := w.Functions.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Functions.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index ca256564c..8984a9538 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -311,11 +311,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.GitCredentials.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.GitCredentials.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/global-init-scripts/global-init-scripts.go b/cmd/workspace/global-init-scripts/global-init-scripts.go index c40b6785a..de08614fe 100755 --- a/cmd/workspace/global-init-scripts/global-init-scripts.go +++ b/cmd/workspace/global-init-scripts/global-init-scripts.go @@ -309,11 +309,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.GlobalInitScripts.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.GlobalInitScripts.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index 588bce316..aba54b8be 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -314,11 +314,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Groups.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Groups.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index 968f64bc6..c9389fef8 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -602,11 +602,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.InstancePools.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.InstancePools.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/instance-profiles/instance-profiles.go b/cmd/workspace/instance-profiles/instance-profiles.go index ca78a15f2..2077c4bfc 100755 --- a/cmd/workspace/instance-profiles/instance-profiles.go +++ b/cmd/workspace/instance-profiles/instance-profiles.go @@ -251,11 +251,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.InstanceProfiles.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.InstanceProfiles.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/ip-access-lists/ip-access-lists.go b/cmd/workspace/ip-access-lists/ip-access-lists.go index 5bba8b51d..9eb08cb43 100755 --- a/cmd/workspace/ip-access-lists/ip-access-lists.go +++ b/cmd/workspace/ip-access-lists/ip-access-lists.go @@ -340,11 +340,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.IpAccessLists.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.IpAccessLists.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 634a7f399..957aa6093 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -1042,11 +1042,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Jobs.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Jobs.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1112,11 +1109,8 @@ func newListRuns() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Jobs.ListRunsAll(ctx, listRunsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Jobs.ListRuns(ctx, listRunsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/jobs/overrides.go b/cmd/workspace/jobs/overrides.go index fd22dcbdb..ee7d20551 100644 --- a/cmd/workspace/jobs/overrides.go +++ b/cmd/workspace/jobs/overrides.go @@ -13,8 +13,9 @@ func listOverride(listCmd *cobra.Command, listReq *jobs.ListJobsRequest) { } func listRunsOverride(listRunsCmd *cobra.Command, listRunsReq *jobs.ListRunsRequest) { + listRunsCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Job ID"}} {{header "Run ID"}} {{header "Result State"}} URL`) listRunsCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Job ID"}} {{header "Run ID"}} {{header "Result State"}} URL {{range .}}{{green "%d" .JobId}} {{cyan "%d" .RunId}} {{if eq .State.ResultState "SUCCESS"}}{{"SUCCESS"|green}}{{else}}{{red "%s" .State.ResultState}}{{end}} {{.RunPageUrl}} {{end}}`) } diff --git a/cmd/workspace/libraries/libraries.go b/cmd/workspace/libraries/libraries.go index 1e742892d..fef81c25f 100755 --- a/cmd/workspace/libraries/libraries.go +++ b/cmd/workspace/libraries/libraries.go @@ -157,11 +157,8 @@ func newClusterStatus() *cobra.Command { clusterStatusReq.ClusterId = args[0] - response, err := w.Libraries.ClusterStatusAll(ctx, clusterStatusReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Libraries.ClusterStatus(ctx, clusterStatusReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index fdd0d1c08..d63576d4e 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -455,11 +455,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Metastores.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Metastores.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/metastores/overrides.go b/cmd/workspace/metastores/overrides.go index 2c9ca6f79..3ee6a1071 100644 --- a/cmd/workspace/metastores/overrides.go +++ b/cmd/workspace/metastores/overrides.go @@ -6,8 +6,9 @@ import ( ) func listOverride(listCmd *cobra.Command) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}} {{"Region"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{"Region"}} {{range .}}{{.MetastoreId|green}} {{.Name|cyan}} {{.Region}} {{end}}`) } diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index fade898ec..9c6034b56 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -1128,11 +1128,8 @@ func newGetLatestVersions() *cobra.Command { getLatestVersionsReq.Name = args[0] } - response, err := w.ModelRegistry.GetLatestVersionsAll(ctx, getLatestVersionsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.GetLatestVersions(ctx, getLatestVersionsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1520,11 +1517,8 @@ func newListModels() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ModelRegistry.ListModelsAll(ctx, listModelsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.ListModels(ctx, listModelsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1586,11 +1580,8 @@ func newListTransitionRequests() *cobra.Command { listTransitionRequestsReq.Name = args[0] listTransitionRequestsReq.Version = args[1] - response, err := w.ModelRegistry.ListTransitionRequestsAll(ctx, listTransitionRequestsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.ListTransitionRequests(ctx, listTransitionRequestsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1651,11 +1642,8 @@ func newListWebhooks() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ModelRegistry.ListWebhooksAll(ctx, listWebhooksReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.ListWebhooks(ctx, listWebhooksReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1900,11 +1888,8 @@ func newSearchModelVersions() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ModelRegistry.SearchModelVersionsAll(ctx, searchModelVersionsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.SearchModelVersions(ctx, searchModelVersionsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1964,11 +1949,8 @@ func newSearchModels() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ModelRegistry.SearchModelsAll(ctx, searchModelsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.SearchModels(ctx, searchModelsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index 97438264e..b4492cb36 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -315,11 +315,8 @@ func newList() *cobra.Command { listReq.FullName = args[0] - response, err := w.ModelVersions.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelVersions.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index ad54b6b10..4c2db6aa3 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -536,11 +536,8 @@ func newListPipelineEvents() *cobra.Command { } listPipelineEventsReq.PipelineId = args[0] - response, err := w.Pipelines.ListPipelineEventsAll(ctx, listPipelineEventsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Pipelines.ListPipelineEvents(ctx, listPipelineEventsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -600,11 +597,8 @@ func newListPipelines() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Pipelines.ListPipelinesAll(ctx, listPipelinesReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Pipelines.ListPipelines(ctx, listPipelinesReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/policy-families/policy-families.go b/cmd/workspace/policy-families/policy-families.go index 75ab862a7..c81d2e92c 100755 --- a/cmd/workspace/policy-families/policy-families.go +++ b/cmd/workspace/policy-families/policy-families.go @@ -138,11 +138,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.PolicyFamilies.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.PolicyFamilies.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index 851c668a7..255296488 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -323,11 +323,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Providers.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Providers.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -401,11 +398,8 @@ func newListShares() *cobra.Command { } listSharesReq.Name = args[0] - response, err := w.Providers.ListSharesAll(ctx, listSharesReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Providers.ListShares(ctx, listSharesReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/queries/overrides.go b/cmd/workspace/queries/overrides.go index a06dabdeb..d7edf93a0 100644 --- a/cmd/workspace/queries/overrides.go +++ b/cmd/workspace/queries/overrides.go @@ -8,8 +8,9 @@ import ( func listOverride(listCmd *cobra.Command, listReq *sql.ListQueriesRequest) { // TODO: figure out colored/non-colored headers and colspan shifts + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}} {{header "Author"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{header "Author"}} {{range .}}{{.Id|green}} {{.Name|cyan}} {{.User.Email|cyan}} {{end}}`) } diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index c4349213e..ef2de4466 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -303,11 +303,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Queries.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Queries.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/query-history/query-history.go b/cmd/workspace/query-history/query-history.go index 337ab4033..847461058 100755 --- a/cmd/workspace/query-history/query-history.go +++ b/cmd/workspace/query-history/query-history.go @@ -73,11 +73,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.QueryHistory.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.QueryHistory.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 463d7985c..d7d432b9c 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -342,11 +342,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Recipients.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Recipients.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index b506e180a..98aec3bb3 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -450,11 +450,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.RegisteredModels.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.RegisteredModels.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index 62f637502..0c38183aa 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -485,11 +485,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Repos.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Repos.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/schemas/overrides.go b/cmd/workspace/schemas/overrides.go index 180690b6e..ba4c65ce7 100644 --- a/cmd/workspace/schemas/overrides.go +++ b/cmd/workspace/schemas/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *catalog.ListSchemasRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Full Name"}} {{header "Owner"}} {{header "Comment"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Full Name"}} {{header "Owner"}} {{header "Comment"}} {{range .}}{{.FullName|green}} {{.Owner|cyan}} {{.Comment}} {{end}}`) } diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index fc496467e..ebdab2ab5 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -333,11 +333,8 @@ func newList() *cobra.Command { listReq.CatalogName = args[0] - response, err := w.Schemas.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Schemas.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/secrets/overrides.go b/cmd/workspace/secrets/overrides.go index 6e765bf73..b215f17a7 100644 --- a/cmd/workspace/secrets/overrides.go +++ b/cmd/workspace/secrets/overrides.go @@ -11,15 +11,17 @@ func cmdOverride(cmd *cobra.Command) { } func listScopesOverride(listScopesCmd *cobra.Command) { + listScopesCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Scope"}} {{header "Backend Type"}}`) listScopesCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Scope"}} {{header "Backend Type"}} {{range .}}{{.Name|green}} {{.BackendType}} {{end}}`) } func listSecretsOverride(listSecretsCommand *cobra.Command, _ *workspace.ListSecretsRequest) { + listSecretsCommand.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Key"}} {{header "Last Updated Timestamp"}}`) listSecretsCommand.Annotations["template"] = cmdio.Heredoc(` - {{header "Key"}} {{header "Last Updated Timestamp"}} {{range .}}{{.Key|green}} {{.LastUpdatedTimestamp}} {{end}}`) } diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index 270538b00..ec6423d06 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -590,11 +590,8 @@ func newListAcls() *cobra.Command { listAclsReq.Scope = args[0] - response, err := w.Secrets.ListAclsAll(ctx, listAclsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Secrets.ListAcls(ctx, listAclsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -641,11 +638,8 @@ func newListScopes() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Secrets.ListScopesAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Secrets.ListScopes(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -712,11 +706,8 @@ func newListSecrets() *cobra.Command { listSecretsReq.Scope = args[0] - response, err := w.Secrets.ListSecretsAll(ctx, listSecretsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Secrets.ListSecrets(ctx, listSecretsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index 5e66804d1..353c08761 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -313,11 +313,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ServicePrincipals.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ServicePrincipals.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 8c488d093..9424c5e4e 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -543,11 +543,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ServingEndpoints.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ServingEndpoints.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index 7cb85abfb..2c0479a0a 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -281,11 +281,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Shares.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Shares.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/storage-credentials/overrides.go b/cmd/workspace/storage-credentials/overrides.go index 534e045dd..92dec91eb 100644 --- a/cmd/workspace/storage-credentials/overrides.go +++ b/cmd/workspace/storage-credentials/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *catalog.ListStorageCredentialsRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}} {{header "Credentials"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{header "Credentials"}} {{range .}}{{.Id|green}} {{.Name|cyan}} {{if .AwsIamRole}}{{.AwsIamRole.RoleArn}}{{end}}{{if .AzureServicePrincipal}}{{.AzureServicePrincipal.ApplicationId}}{{end}}{{if .DatabricksGcpServiceAccount}}{{.DatabricksGcpServiceAccount.Email}}{{end}} {{end}}`) } diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 910d2b5df..4a0d8f309 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -336,11 +336,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.StorageCredentials.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.StorageCredentials.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/system-schemas/system-schemas.go b/cmd/workspace/system-schemas/system-schemas.go index 6dbad5a3f..9b2392a6e 100755 --- a/cmd/workspace/system-schemas/system-schemas.go +++ b/cmd/workspace/system-schemas/system-schemas.go @@ -216,11 +216,8 @@ func newList() *cobra.Command { listReq.MetastoreId = args[0] - response, err := w.SystemSchemas.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.SystemSchemas.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/tables/overrides.go b/cmd/workspace/tables/overrides.go index 35fc351a4..a0849ada7 100644 --- a/cmd/workspace/tables/overrides.go +++ b/cmd/workspace/tables/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *catalog.ListTablesRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Full Name"}} {{header "Table Type"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Full Name"}} {{header "Table Type"}} {{range .}}{{.FullName|green}} {{blue "%s" .TableType}} {{end}}`) } diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index 0dfae0fef..d4e76587d 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -342,11 +342,8 @@ func newList() *cobra.Command { listReq.CatalogName = args[0] listReq.SchemaName = args[1] - response, err := w.Tables.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Tables.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -433,11 +430,8 @@ func newListSummaries() *cobra.Command { } listSummariesReq.CatalogName = args[0] - response, err := w.Tables.ListSummariesAll(ctx, listSummariesReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Tables.ListSummaries(ctx, listSummariesReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/token-management/overrides.go b/cmd/workspace/token-management/overrides.go index 46967d37a..8122c1a1b 100644 --- a/cmd/workspace/token-management/overrides.go +++ b/cmd/workspace/token-management/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *settings.ListTokenManagementRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Created By"}} {{header "Comment"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Created By"}} {{header "Comment"}} {{range .}}{{.TokenId|green}} {{.CreatedByUsername|cyan}} {{.Comment|cyan}} {{end}}`) } diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index 276de6a8e..1c2e2c37c 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -422,11 +422,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.TokenManagement.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.TokenManagement.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/tokens/overrides.go b/cmd/workspace/tokens/overrides.go index 09c51758e..142902da4 100644 --- a/cmd/workspace/tokens/overrides.go +++ b/cmd/workspace/tokens/overrides.go @@ -6,8 +6,9 @@ import ( ) func listOverride(listCmd *cobra.Command) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Expiry time"}} {{header "Comment"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Expiry time"}} {{header "Comment"}} {{range .}}{{.TokenId|green}} {{cyan "%d" .ExpiryTime}} {{.Comment|cyan}} {{end}}`) } diff --git a/cmd/workspace/tokens/tokens.go b/cmd/workspace/tokens/tokens.go index cd82ef63f..5550acfa5 100755 --- a/cmd/workspace/tokens/tokens.go +++ b/cmd/workspace/tokens/tokens.go @@ -232,11 +232,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Tokens.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Tokens.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index 4cc485e96..078a712e4 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -426,11 +426,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Users.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Users.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go index d429267ad..d6863b660 100755 --- a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go +++ b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go @@ -308,11 +308,8 @@ func newListEndpoints() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.VectorSearchEndpoints.ListEndpointsAll(ctx, listEndpointsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.VectorSearchEndpoints.ListEndpoints(ctx, listEndpointsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index 0d3277f2a..6beca7d21 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -389,11 +389,8 @@ func newListIndexes() *cobra.Command { listIndexesReq.EndpointName = args[0] - response, err := w.VectorSearchIndexes.ListIndexesAll(ctx, listIndexesReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.VectorSearchIndexes.ListIndexes(ctx, listIndexesReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 1944237c0..12cafeaf8 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -292,11 +292,8 @@ func newList() *cobra.Command { listReq.CatalogName = args[0] listReq.SchemaName = args[1] - response, err := w.Volumes.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Volumes.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/warehouses/overrides.go b/cmd/workspace/warehouses/overrides.go index 0714937c2..9457557d0 100644 --- a/cmd/workspace/warehouses/overrides.go +++ b/cmd/workspace/warehouses/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *sql.ListWarehousesRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}} {{header "Size"}} {{header "State"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{header "Size"}} {{header "State"}} {{range .}}{{.Id|green}} {{.Name|cyan}} {{.ClusterSize|cyan}} {{if eq .State "RUNNING"}}{{"RUNNING"|green}}{{else if eq .State "STOPPED"}}{{"STOPPED"|red}}{{else}}{{blue "%s" .State}}{{end}} {{end}}`) } diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index c64788b89..2e9282a85 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -661,11 +661,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Warehouses.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Warehouses.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/workspace/export_dir.go b/cmd/workspace/workspace/export_dir.go index d2a86d009..79e64e8ad 100644 --- a/cmd/workspace/workspace/export_dir.go +++ b/cmd/workspace/workspace/export_dir.go @@ -55,7 +55,7 @@ func (opts exportDirOptions) callback(ctx context.Context, workspaceFiler filer. // If a file exists, and overwrite is not set, we skip exporting the file if _, err := os.Stat(targetPath); err == nil && !overwrite { // Log event that this file/directory has been skipped - return cmdio.RenderWithTemplate(ctx, newFileSkippedEvent(relPath, targetPath), "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n") + return cmdio.RenderWithTemplate(ctx, newFileSkippedEvent(relPath, targetPath), "", "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n") } // create the file @@ -74,7 +74,7 @@ func (opts exportDirOptions) callback(ctx context.Context, workspaceFiler filer. if err != nil { return err } - return cmdio.RenderWithTemplate(ctx, newFileExportedEvent(sourcePath, targetPath), "{{.SourcePath}} -> {{.TargetPath}}\n") + return cmdio.RenderWithTemplate(ctx, newFileExportedEvent(sourcePath, targetPath), "", "{{.SourcePath}} -> {{.TargetPath}}\n") } } diff --git a/cmd/workspace/workspace/import_dir.go b/cmd/workspace/workspace/import_dir.go index bc0b80667..6ce5f3c2b 100644 --- a/cmd/workspace/workspace/import_dir.go +++ b/cmd/workspace/workspace/import_dir.go @@ -93,14 +93,14 @@ func (opts importDirOptions) callback(ctx context.Context, workspaceFiler filer. // Emit file skipped event with the appropriate template fileSkippedEvent := newFileSkippedEvent(localName, path.Join(targetDir, remoteName)) template := "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n" - return cmdio.RenderWithTemplate(ctx, fileSkippedEvent, template) + return cmdio.RenderWithTemplate(ctx, fileSkippedEvent, "", template) } if err != nil { return err } } fileImportedEvent := newFileImportedEvent(localName, path.Join(targetDir, remoteName)) - return cmdio.RenderWithTemplate(ctx, fileImportedEvent, "{{.SourcePath}} -> {{.TargetPath}}\n") + return cmdio.RenderWithTemplate(ctx, fileImportedEvent, "", "{{.SourcePath}} -> {{.TargetPath}}\n") } } diff --git a/cmd/workspace/workspace/overrides.go b/cmd/workspace/workspace/overrides.go index 1cac67419..cfed0a6ee 100644 --- a/cmd/workspace/workspace/overrides.go +++ b/cmd/workspace/workspace/overrides.go @@ -17,8 +17,9 @@ import ( func listOverride(listCmd *cobra.Command, listReq *workspace.ListWorkspaceRequest) { listReq.Path = "/" + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Type"}} {{header "Language"}} {{header "Path"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Type"}} {{header "Language"}} {{header "Path"}} {{range .}}{{green "%d" .ObjectId}} {{blue "%s" .ObjectType}} {{cyan "%s" .Language}} {{.Path|cyan}} {{end}}`) } diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 5777f22fe..4fb63f0c0 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -577,11 +577,8 @@ func newList() *cobra.Command { listReq.Path = args[0] - response, err := w.Workspace.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Workspace.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 2c2b2dac9..a8fbd230e 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -25,7 +25,7 @@ func initTestTemplate(t *testing.T, ctx context.Context, templateName string, co } ctx = root.SetWorkspaceClient(ctx, nil) - cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "bundles") + cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") ctx = cmdio.InContext(ctx, cmd) err = template.Materialize(ctx, configFilePath, templateRoot, bundleRoot) diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index d20991a7c..75c0c4b87 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -22,27 +22,29 @@ import ( type cmdIO struct { // states if we are in the interactive mode // e.g. if stdout is a terminal - interactive bool - outputFormat flags.Output - template string - in io.Reader - out io.Writer - err io.Writer + interactive bool + outputFormat flags.Output + headerTemplate string + template string + in io.Reader + out io.Writer + err io.Writer } -func NewIO(outputFormat flags.Output, in io.Reader, out io.Writer, err io.Writer, template string) *cmdIO { +func NewIO(outputFormat flags.Output, in io.Reader, out io.Writer, err io.Writer, headerTemplate, template string) *cmdIO { // The check below is similar to color.NoColor but uses the specified err writer. dumb := os.Getenv("NO_COLOR") != "" || os.Getenv("TERM") == "dumb" if f, ok := err.(*os.File); ok && !dumb { dumb = !isatty.IsTerminal(f.Fd()) && !isatty.IsCygwinTerminal(f.Fd()) } return &cmdIO{ - interactive: !dumb, - outputFormat: outputFormat, - template: template, - in: in, - out: out, - err: err, + interactive: !dumb, + outputFormat: outputFormat, + headerTemplate: headerTemplate, + template: template, + in: in, + out: out, + err: err, } } @@ -113,48 +115,6 @@ func IsGitBash(ctx context.Context) bool { return false } -func Render(ctx context.Context, v any) error { - c := fromContext(ctx) - return RenderWithTemplate(ctx, v, c.template) -} - -func RenderWithTemplate(ctx context.Context, v any, template string) error { - // TODO: add terminal width & white/dark theme detection - c := fromContext(ctx) - switch c.outputFormat { - case flags.OutputJSON: - return renderJson(c.out, v) - case flags.OutputText: - if template != "" { - return renderTemplate(c.out, template, v) - } - return renderJson(c.out, v) - default: - return fmt.Errorf("invalid output format: %s", c.outputFormat) - } -} - -func RenderJson(ctx context.Context, v any) error { - c := fromContext(ctx) - if c.outputFormat == flags.OutputJSON { - return renderJson(c.out, v) - } - return nil -} - -func RenderReader(ctx context.Context, r io.Reader) error { - c := fromContext(ctx) - switch c.outputFormat { - case flags.OutputJSON: - return fmt.Errorf("json output not supported") - case flags.OutputText: - _, err := io.Copy(c.out, r) - return err - default: - return fmt.Errorf("invalid output format: %s", c.outputFormat) - } -} - type Tuple struct{ Name, Id string } func (c *cmdIO) Select(items []Tuple, label string) (id string, err error) { diff --git a/libs/cmdio/render.go b/libs/cmdio/render.go index d641f61df..40cdde354 100644 --- a/libs/cmdio/render.go +++ b/libs/cmdio/render.go @@ -2,14 +2,19 @@ package cmdio import ( "bytes" + "context" "encoding/base64" "encoding/json" + "errors" + "fmt" "io" "strings" "text/tabwriter" "text/template" "time" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/listing" "github.com/fatih/color" "github.com/nwidger/jsoncolor" ) @@ -46,8 +51,123 @@ func Heredoc(tmpl string) (trimmed string) { return strings.TrimSpace(trimmed) } -func renderJson(w io.Writer, v any) error { - pretty, err := fancyJSON(v) +// writeFlusher represents a buffered writer that can be flushed. This is useful when +// buffering writing a large number of resources (such as during a list API). +type writeFlusher interface { + io.Writer + Flush() error +} + +type jsonRenderer interface { + // Render an object as JSON to the provided writeFlusher. + renderJson(context.Context, writeFlusher) error +} + +type textRenderer interface { + // Render an object as text to the provided writeFlusher. + renderText(context.Context, io.Writer) error +} + +type templateRenderer interface { + // Render an object using the provided template and write to the provided tabwriter.Writer. + renderTemplate(context.Context, *template.Template, *tabwriter.Writer) error +} + +type readerRenderer struct { + reader io.Reader +} + +func (r readerRenderer) renderText(_ context.Context, w io.Writer) error { + _, err := io.Copy(w, r.reader) + return err +} + +type iteratorRenderer[T any] struct { + t listing.Iterator[T] + bufferSize int +} + +func (ir iteratorRenderer[T]) getBufferSize() int { + if ir.bufferSize == 0 { + return 20 + } + return ir.bufferSize +} + +func (ir iteratorRenderer[T]) renderJson(ctx context.Context, w writeFlusher) error { + // Iterators are always rendered as a list of resources in JSON. + _, err := w.Write([]byte("[\n ")) + if err != nil { + return err + } + for i := 0; ir.t.HasNext(ctx); i++ { + if i != 0 { + _, err = w.Write([]byte(",\n ")) + if err != nil { + return err + } + } + n, err := ir.t.Next(ctx) + if err != nil { + return err + } + res, err := json.MarshalIndent(n, " ", " ") + if err != nil { + return err + } + _, err = w.Write(res) + if err != nil { + return err + } + if (i+1)%ir.getBufferSize() == 0 { + err = w.Flush() + if err != nil { + return err + } + } + } + _, err = w.Write([]byte("\n]\n")) + if err != nil { + return err + } + return w.Flush() +} + +func (ir iteratorRenderer[T]) renderTemplate(ctx context.Context, t *template.Template, w *tabwriter.Writer) error { + buf := make([]any, 0, ir.getBufferSize()) + for i := 0; ir.t.HasNext(ctx); i++ { + n, err := ir.t.Next(ctx) + if err != nil { + return err + } + buf = append(buf, n) + if len(buf) == cap(buf) { + err = t.Execute(w, buf) + if err != nil { + return err + } + err = w.Flush() + if err != nil { + return err + } + buf = buf[:0] + } + } + if len(buf) > 0 { + err := t.Execute(w, buf) + if err != nil { + return err + } + } + return w.Flush() +} + +type defaultRenderer struct { + t any +} + +func (d defaultRenderer) renderJson(_ context.Context, w writeFlusher) error { + pretty, err := fancyJSON(d.t) if err != nil { return err } @@ -56,12 +176,126 @@ func renderJson(w io.Writer, v any) error { return err } _, err = w.Write([]byte("\n")) - return err + if err != nil { + return err + } + return w.Flush() } -func renderTemplate(w io.Writer, tmpl string, v any) error { +func (d defaultRenderer) renderTemplate(_ context.Context, t *template.Template, w *tabwriter.Writer) error { + return t.Execute(w, d.t) +} + +// Returns something implementing one of the following interfaces: +// - jsonRenderer +// - textRenderer +// - templateRenderer +func newRenderer(t any) any { + if r, ok := t.(io.Reader); ok { + return readerRenderer{reader: r} + } + return defaultRenderer{t: t} +} + +func newIteratorRenderer[T any](i listing.Iterator[T]) iteratorRenderer[T] { + return iteratorRenderer[T]{t: i} +} + +type bufferedFlusher struct { + w io.Writer + b *bytes.Buffer +} + +func (b bufferedFlusher) Write(bs []byte) (int, error) { + return b.b.Write(bs) +} + +func (b bufferedFlusher) Flush() error { + _, err := b.w.Write(b.b.Bytes()) + if err != nil { + return err + } + b.b.Reset() + return nil +} + +func newBufferedFlusher(w io.Writer) writeFlusher { + return bufferedFlusher{ + w: w, + b: &bytes.Buffer{}, + } +} + +func renderWithTemplate(r any, ctx context.Context, outputFormat flags.Output, w io.Writer, headerTemplate, template string) error { + // TODO: add terminal width & white/dark theme detection + switch outputFormat { + case flags.OutputJSON: + if jr, ok := r.(jsonRenderer); ok { + return jr.renderJson(ctx, newBufferedFlusher(w)) + } + return errors.New("json output not supported") + case flags.OutputText: + if tr, ok := r.(templateRenderer); ok && template != "" { + return renderUsingTemplate(ctx, tr, w, headerTemplate, template) + } + if tr, ok := r.(textRenderer); ok { + return tr.renderText(ctx, w) + } + if jr, ok := r.(jsonRenderer); ok { + return jr.renderJson(ctx, newBufferedFlusher(w)) + } + return errors.New("no renderer defined") + default: + return fmt.Errorf("invalid output format: %s", outputFormat) + } +} + +type listingInterface interface { + HasNext(context.Context) bool +} + +func Render(ctx context.Context, v any) error { + c := fromContext(ctx) + if _, ok := v.(listingInterface); ok { + panic("use RenderIterator instead") + } + return renderWithTemplate(newRenderer(v), ctx, c.outputFormat, c.out, c.headerTemplate, c.template) +} + +func RenderIterator[T any](ctx context.Context, i listing.Iterator[T]) error { + c := fromContext(ctx) + return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, c.headerTemplate, c.template) +} + +func RenderWithTemplate(ctx context.Context, v any, headerTemplate, template string) error { + c := fromContext(ctx) + if _, ok := v.(listingInterface); ok { + panic("use RenderIteratorWithTemplate instead") + } + return renderWithTemplate(newRenderer(v), ctx, c.outputFormat, c.out, headerTemplate, template) +} + +func RenderIteratorWithTemplate[T any](ctx context.Context, i listing.Iterator[T], headerTemplate, template string) error { + c := fromContext(ctx) + return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, headerTemplate, template) +} + +func RenderJson(ctx context.Context, v any) error { + c := fromContext(ctx) + if _, ok := v.(listingInterface); ok { + panic("use RenderIteratorJson instead") + } + return renderWithTemplate(newRenderer(v), ctx, flags.OutputJSON, c.out, c.headerTemplate, c.template) +} + +func RenderIteratorJson[T any](ctx context.Context, i listing.Iterator[T]) error { + c := fromContext(ctx) + return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, c.headerTemplate, c.template) +} + +func renderUsingTemplate(ctx context.Context, r templateRenderer, w io.Writer, headerTmpl, tmpl string) error { tw := tabwriter.NewWriter(w, 0, 4, 2, ' ', 0) - t, err := template.New("command").Funcs(template.FuncMap{ + base := template.New("command").Funcs(template.FuncMap{ // we render colored output if stdout is TTY, otherwise we render text. // in the future we'll check if we can explicitly check for stderr being // a TTY @@ -116,11 +350,24 @@ func renderTemplate(w io.Writer, tmpl string, v any) error { } return string(out), nil }, - }).Parse(tmpl) + }) + if headerTmpl != "" { + headerT, err := base.Parse(headerTmpl) + if err != nil { + return err + } + err = headerT.Execute(tw, nil) + if err != nil { + return err + } + tw.Write([]byte("\n")) + // Do not flush here. Instead, allow the first 100 resources to determine the initial spacing of the header columns. + } + t, err := base.Parse(tmpl) if err != nil { return err } - err = t.Execute(tw, v) + err = r.renderTemplate(ctx, t, tw) if err != nil { return err } diff --git a/libs/cmdio/render_test.go b/libs/cmdio/render_test.go new file mode 100644 index 000000000..6bde446c4 --- /dev/null +++ b/libs/cmdio/render_test.go @@ -0,0 +1,190 @@ +package cmdio + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "testing" + + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/service/provisioning" + "github.com/stretchr/testify/assert" +) + +type testCase struct { + name string + v any + outputFormat flags.Output + headerTemplate string + template string + expected string + errMessage string +} + +var dummyWorkspace1 = provisioning.Workspace{ + WorkspaceId: 123, + WorkspaceName: "abc", +} + +var dummyWorkspace2 = provisioning.Workspace{ + WorkspaceId: 456, + WorkspaceName: "def", +} + +type dummyIterator struct { + items []*provisioning.Workspace +} + +func (d *dummyIterator) HasNext(_ context.Context) bool { + return len(d.items) > 0 +} + +func (d *dummyIterator) Next(ctx context.Context) (*provisioning.Workspace, error) { + if !d.HasNext(ctx) { + return nil, errors.New("no more items") + } + item := d.items[0] + d.items = d.items[1:] + return item, nil +} + +func makeWorkspaces(count int) []*provisioning.Workspace { + res := make([]*provisioning.Workspace, 0, count) + next := []*provisioning.Workspace{&dummyWorkspace1, &dummyWorkspace2} + for i := 0; i < count; i++ { + n := next[0] + next = append(next[1:], n) + res = append(res, n) + } + return res +} + +func makeIterator(count int) listing.Iterator[*provisioning.Workspace] { + items := make([]*provisioning.Workspace, 0, count) + items = append(items, makeWorkspaces(count)...) + return &dummyIterator{ + items: items, + } +} + +func makeBigOutput(count int) string { + res := bytes.Buffer{} + for _, ws := range makeWorkspaces(count) { + res.Write([]byte(fmt.Sprintf("%d %s\n", ws.WorkspaceId, ws.WorkspaceName))) + } + return res.String() +} + +func must[T any](a T, e error) T { + if e != nil { + panic(e) + } + return a +} + +var testCases = []testCase{ + { + name: "Workspace with header and template", + v: dummyWorkspace1, + outputFormat: flags.OutputText, + headerTemplate: "id\tname", + template: "{{.WorkspaceId}}\t{{.WorkspaceName}}", + expected: `id name +123 abc`, + }, + { + name: "Workspace with no header and template", + v: dummyWorkspace1, + outputFormat: flags.OutputText, + template: "{{.WorkspaceId}}\t{{.WorkspaceName}}", + expected: `123 abc`, + }, + { + name: "Workspace with no header and no template", + v: dummyWorkspace1, + outputFormat: flags.OutputText, + expected: `{ + "workspace_id":123, + "workspace_name":"abc" +} +`, + }, + { + name: "Workspace Iterator with header and template", + v: makeIterator(2), + outputFormat: flags.OutputText, + headerTemplate: "id\tname", + template: "{{range .}}{{.WorkspaceId}}\t{{.WorkspaceName}}\n{{end}}", + expected: `id name +123 abc +456 def +`, + }, + { + name: "Workspace Iterator with no header and template", + v: makeIterator(2), + outputFormat: flags.OutputText, + template: "{{range .}}{{.WorkspaceId}}\t{{.WorkspaceName}}\n{{end}}", + expected: `123 abc +456 def +`, + }, + { + name: "Workspace Iterator with no header and no template", + v: makeIterator(2), + outputFormat: flags.OutputText, + expected: string(must(json.MarshalIndent(makeWorkspaces(2), "", " "))) + "\n", + }, + { + name: "Big Workspace Iterator with template", + v: makeIterator(234), + outputFormat: flags.OutputText, + headerTemplate: "id\tname", + template: "{{range .}}{{.WorkspaceId}}\t{{.WorkspaceName}}\n{{end}}", + expected: "id name\n" + makeBigOutput(234), + }, + { + name: "Big Workspace Iterator with no template", + v: makeIterator(234), + outputFormat: flags.OutputText, + expected: string(must(json.MarshalIndent(makeWorkspaces(234), "", " "))) + "\n", + }, + { + name: "io.Reader", + v: strings.NewReader("a test"), + outputFormat: flags.OutputText, + expected: "a test", + }, + { + name: "io.Reader", + v: strings.NewReader("a test"), + outputFormat: flags.OutputJSON, + errMessage: "json output not supported", + }, +} + +func TestRender(t *testing.T) { + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + output := &bytes.Buffer{} + cmdIO := NewIO(c.outputFormat, nil, output, output, c.headerTemplate, c.template) + ctx := InContext(context.Background(), cmdIO) + var err error + if vv, ok := c.v.(listing.Iterator[*provisioning.Workspace]); ok { + err = RenderIterator(ctx, vv) + } else { + err = Render(ctx, c.v) + } + if c.errMessage != "" { + assert.ErrorContains(t, err, c.errMessage) + } else { + assert.NoError(t, err) + assert.Equal(t, c.expected, output.String()) + } + }) + } +} diff --git a/libs/databrickscfg/cfgpickers/clusters_test.go b/libs/databrickscfg/cfgpickers/clusters_test.go index 8afcd6d07..2e62f93a8 100644 --- a/libs/databrickscfg/cfgpickers/clusters_test.go +++ b/libs/databrickscfg/cfgpickers/clusters_test.go @@ -115,7 +115,7 @@ func TestFirstCompatibleCluster(t *testing.T) { w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) clusterID, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) require.NoError(t, err) require.Equal(t, "bcd-id", clusterID) @@ -162,7 +162,7 @@ func TestNoCompatibleClusters(t *testing.T) { w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) _, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) require.Equal(t, ErrNoCompatibleClusters, err) } diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index d495ae895..a07b26f81 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -111,7 +111,7 @@ func TestWorkspaceHost(t *testing.T) { func TestWorkspaceHostNotConfigured(t *testing.T) { ctx := context.Background() - cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "template") + cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "template") ctx = cmdio.InContext(ctx, cmd) tmpDir := t.TempDir() From 1588a14d07f36a2faa1923323dfd503eae42eca1 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 22 Feb 2024 15:52:49 +0100 Subject: [PATCH 051/286] Add correct tag value for models in dev mode (#1230) ## Changes Fixes #922 ## Tests Added regression test case --- bundle/config/mutator/process_target_mode.go | 2 +- bundle/config/mutator/process_target_mode_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 592e3612c..e57509452 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -70,7 +70,7 @@ func transformDevelopmentMode(b *bundle.Bundle) error { for i := range r.Models { r.Models[i].Name = prefix + r.Models[i].Name - r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: ""}) + r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: tagValue}) } for i := range r.Experiments { diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 6d8025803..a5f61284c 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -138,6 +138,7 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Model 1 assert.Equal(t, "[dev lennart] model1", b.Config.Resources.Models["model1"].Name) + assert.Contains(t, b.Config.Resources.Models["model1"].Tags, ml.ModelTag{Key: "dev", Value: "lennart"}) // Model serving endpoint 1 assert.Equal(t, "dev_lennart_servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) From 1b4a774609bfde5841b18bd31ff79b154b362949 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 22 Feb 2024 16:14:06 +0100 Subject: [PATCH 052/286] Only set ComputeID value when `--compute-id` flag provided (#1229) ## Changes Fixes an issue when `compute_id` is defined in the bundle config, correctly replaced in `validate` command but not used in `deploy` command ## Tests Manually --- cmd/bundle/deploy.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index c1f0cdf29..cd3a2a368 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -32,7 +32,9 @@ func newDeployCommand() *cobra.Command { bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) error { b.Config.Bundle.Force = force b.Config.Bundle.Deployment.Lock.Force = forceLock - b.Config.Bundle.ComputeID = computeID + if cmd.Flag("compute-id").Changed { + b.Config.Bundle.ComputeID = computeID + } if cmd.Flag("fail-on-active-runs").Changed { b.Config.Bundle.Deployment.FailOnActiveRuns = failOnActiveRuns From f69b70782db8abdb277321b36d94c43758568130 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 22 Feb 2024 16:17:43 +0100 Subject: [PATCH 053/286] Handle alias types for map keys in toTyped conversion (#1232) ## Changes Handle alias types for map keys in toTyped conversion ## Tests Added an unit test --- libs/dyn/convert/to_typed.go | 3 ++- libs/dyn/convert/to_typed_test.go | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index aeaaa9bea..8b3cf3bb8 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -115,12 +115,13 @@ func toTypedMap(dst reflect.Value, src dyn.Value) error { dst.Set(reflect.MakeMapWithSize(dst.Type(), len(m))) for k, v := range m { kv := reflect.ValueOf(k) + kt := dst.Type().Key() vv := reflect.New(dst.Type().Elem()) err := ToTyped(vv.Interface(), v) if err != nil { return err } - dst.SetMapIndex(kv, vv.Elem()) + dst.SetMapIndex(kv.Convert(kt), vv.Elem()) } return nil case dyn.KindNil: diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index a7c4a6f08..a3c340e81 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -495,3 +495,19 @@ func TestToTypedFloat64FromStringVariableReference(t *testing.T) { require.NoError(t, err) assert.Equal(t, float64(0.0), out) } + +func TestToTypedWithAliasKeyType(t *testing.T) { + type custom string + + var out map[custom]string + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Len(t, out, 2) + assert.Equal(t, "bar", out["foo"]) + assert.Equal(t, "baz", out["bar"]) +} From 1dbc086e5a6d7dc69525bd02942b1281e4516c78 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 23 Feb 2024 11:41:42 +0100 Subject: [PATCH 054/286] Upgrade Terraform provider to 1.37.0 (#1235) ## Changes Upgrade Terraform provider to 1.37.0 Currently we're using 1.36.2 version which uses Go SDK 0.30 which does not have U2M enabled for all clouds. Upgrading to 1.37.0 allows TF provider (and thus DABs) to use U2M Fixes #1231 --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../internal/tf/schema/data_source_cluster.go | 2 +- bundle/internal/tf/schema/data_source_job.go | 377 +++++++++++++++++- .../schema/data_source_storage_credential.go | 57 +++ .../schema/data_source_storage_credentials.go | 8 + bundle/internal/tf/schema/data_sources.go | 4 + bundle/internal/tf/schema/resource_cluster.go | 2 +- bundle/internal/tf/schema/resource_file.go | 14 + bundle/internal/tf/schema/resource_job.go | 377 +++++++++++++++++- .../internal/tf/schema/resource_pipeline.go | 2 +- .../schema/resource_vector_search_endpoint.go | 16 + bundle/internal/tf/schema/resource_volume.go | 1 + bundle/internal/tf/schema/resources.go | 4 + bundle/internal/tf/schema/root.go | 2 +- 14 files changed, 855 insertions(+), 13 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_storage_credential.go create mode 100644 bundle/internal/tf/schema/data_source_storage_credentials.go create mode 100644 bundle/internal/tf/schema/resource_file.go create mode 100644 bundle/internal/tf/schema/resource_vector_search_endpoint.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index c79319eda..a41b62257 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.36.2" +const ProviderVersion = "1.37.0" diff --git a/bundle/internal/tf/schema/data_source_cluster.go b/bundle/internal/tf/schema/data_source_cluster.go index d34d63a79..fff66dc93 100644 --- a/bundle/internal/tf/schema/data_source_cluster.go +++ b/bundle/internal/tf/schema/data_source_cluster.go @@ -122,7 +122,7 @@ type DataSourceClusterClusterInfoInitScriptsS3 struct { } type DataSourceClusterClusterInfoInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceClusterClusterInfoInitScriptsWorkspace struct { diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index f9a316d78..6e67b285f 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -21,6 +21,7 @@ type DataSourceJobJobSettingsSettingsDbtTask struct { ProfilesDirectory string `json:"profiles_directory,omitempty"` ProjectDirectory string `json:"project_directory,omitempty"` Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` WarehouseId string `json:"warehouse_id,omitempty"` } @@ -160,7 +161,7 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 struct { } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsWorkspace struct { @@ -347,7 +348,7 @@ type DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 struct { } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsWorkspace struct { @@ -482,6 +483,7 @@ type DataSourceJobJobSettingsSettingsTaskDbtTask struct { ProfilesDirectory string `json:"profiles_directory,omitempty"` ProjectDirectory string `json:"project_directory,omitempty"` Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` WarehouseId string `json:"warehouse_id,omitempty"` } @@ -497,6 +499,371 @@ type DataSourceJobJobSettingsSettingsTaskEmailNotifications struct { OnSuccess []string `json:"on_success,omitempty"` } +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskConditionTask struct { + Left string `json:"left,omitempty"` + Op string `json:"op,omitempty"` + Right string `json:"right,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDbtTask struct { + Catalog string `json:"catalog,omitempty"` + Commands []string `json:"commands"` + ProfilesDirectory string `json:"profiles_directory,omitempty"` + ProjectDirectory string `json:"project_directory,omitempty"` + Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDependsOn struct { + Outcome string `json:"outcome,omitempty"` + TaskKey string `json:"task_key"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications struct { + OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []string `json:"on_failure,omitempty"` + OnStart []string `json:"on_start,omitempty"` + OnSuccess []string `json:"on_success,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealthRules struct { + Metric string `json:"metric,omitempty"` + Op string `json:"op,omitempty"` + Value int `json:"value,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealth struct { + Rules []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealthRules `json:"rules,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"` + Maven *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"` + Pypi *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryPypi `json:"pypi,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAutoscale struct { + MaxWorkers int `json:"max_workers,omitempty"` + MinWorkers int `json:"min_workers,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAwsAttributes struct { + Availability string `json:"availability,omitempty"` + EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeType string `json:"ebs_volume_type,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` + ZoneId string `json:"zone_id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAzureAttributes struct { + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConfDbfs struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConfS3 struct { + CannedAcl string `json:"canned_acl,omitempty"` + Destination string `json:"destination"` + EnableEncryption bool `json:"enable_encryption,omitempty"` + EncryptionType string `json:"encryption_type,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + KmsKey string `json:"kms_key,omitempty"` + Region string `json:"region,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConf struct { + Dbfs *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"` + S3 *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConfS3 `json:"s3,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo struct { + MountOptions string `json:"mount_options,omitempty"` + ServerAddress string `json:"server_address"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterMountInfo struct { + LocalMountDirPath string `json:"local_mount_dir_path"` + RemoteMountDirPath string `json:"remote_mount_dir_path,omitempty"` + NetworkFilesystemInfo *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo `json:"network_filesystem_info,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterDockerImageBasicAuth struct { + Password string `json:"password"` + Username string `json:"username"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterDockerImage struct { + Url string `json:"url"` + BasicAuth *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterDockerImageBasicAuth `json:"basic_auth,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterGcpAttributes struct { + Availability string `json:"availability,omitempty"` + BootDiskSize int `json:"boot_disk_size,omitempty"` + GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` + UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` + ZoneId string `json:"zone_id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsAbfss struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsDbfs struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsFile struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsGcs struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsS3 struct { + CannedAcl string `json:"canned_acl,omitempty"` + Destination string `json:"destination"` + EnableEncryption bool `json:"enable_encryption,omitempty"` + EncryptionType string `json:"encryption_type,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + KmsKey string `json:"kms_key,omitempty"` + Region string `json:"region,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsVolumes struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsWorkspace struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScripts struct { + Abfss *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"` + Dbfs *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"` + File *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsFile `json:"file,omitempty"` + Gcs *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"` + S3 *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsVolumes `json:"volumes,omitempty"` + Workspace *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterWorkloadTypeClients struct { + Jobs bool `json:"jobs,omitempty"` + Notebooks bool `json:"notebooks,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterWorkloadType struct { + Clients *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterWorkloadTypeClients `json:"clients,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewCluster struct { + ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + ClusterId string `json:"cluster_id,omitempty"` + ClusterName string `json:"cluster_name,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + DataSecurityMode string `json:"data_security_mode,omitempty"` + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + IdempotencyToken string `json:"idempotency_token,omitempty"` + InstancePoolId string `json:"instance_pool_id,omitempty"` + NodeTypeId string `json:"node_type_id,omitempty"` + NumWorkers int `json:"num_workers"` + PolicyId string `json:"policy_id,omitempty"` + RuntimeEngine string `json:"runtime_engine,omitempty"` + SingleUserName string `json:"single_user_name,omitempty"` + SparkConf map[string]string `json:"spark_conf,omitempty"` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + SparkVersion string `json:"spark_version"` + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + Autoscale *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAutoscale `json:"autoscale,omitempty"` + AwsAttributes *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"` + AzureAttributes *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"` + ClusterLogConf *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConf `json:"cluster_log_conf,omitempty"` + ClusterMountInfo []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterMountInfo `json:"cluster_mount_info,omitempty"` + DockerImage *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterDockerImage `json:"docker_image,omitempty"` + GcpAttributes *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` + InitScripts []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScripts `json:"init_scripts,omitempty"` + WorkloadType *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterWorkloadType `json:"workload_type,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotebookTask struct { + BaseParameters map[string]string `json:"base_parameters,omitempty"` + NotebookPath string `json:"notebook_path"` + Source string `json:"source,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotificationSettings struct { + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"` + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskPipelineTask struct { + FullRefresh bool `json:"full_refresh,omitempty"` + PipelineId string `json:"pipeline_id"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskPythonWheelTask struct { + EntryPoint string `json:"entry_point,omitempty"` + NamedParameters map[string]string `json:"named_parameters,omitempty"` + PackageName string `json:"package_name,omitempty"` + Parameters []string `json:"parameters,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskRunJobTask struct { + JobId int `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkJarTask struct { + JarUri string `json:"jar_uri,omitempty"` + MainClassName string `json:"main_class_name,omitempty"` + Parameters []string `json:"parameters,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkPythonTask struct { + Parameters []string `json:"parameters,omitempty"` + PythonFile string `json:"python_file"` + Source string `json:"source,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkSubmitTask struct { + Parameters []string `json:"parameters,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlertSubscriptions struct { + DestinationId string `json:"destination_id,omitempty"` + UserName string `json:"user_name,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlert struct { + AlertId string `json:"alert_id"` + PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` + Subscriptions []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlertSubscriptions `json:"subscriptions,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboardSubscriptions struct { + DestinationId string `json:"destination_id,omitempty"` + UserName string `json:"user_name,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboard struct { + CustomSubject string `json:"custom_subject,omitempty"` + DashboardId string `json:"dashboard_id"` + PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` + Subscriptions []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboardSubscriptions `json:"subscriptions,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskFile struct { + Path string `json:"path"` + Source string `json:"source,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskQuery struct { + QueryId string `json:"query_id"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTask struct { + Parameters map[string]string `json:"parameters,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` + Alert *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlert `json:"alert,omitempty"` + Dashboard *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboard `json:"dashboard,omitempty"` + File *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskFile `json:"file,omitempty"` + Query *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskQuery `json:"query,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { + Id string `json:"id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure struct { + Id string `json:"id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart struct { + Id string `json:"id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { + Id string `json:"id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications struct { + OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` + OnStart []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnSuccess []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTask struct { + ComputeKey string `json:"compute_key,omitempty"` + Description string `json:"description,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key,omitempty"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + ConditionTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` + Health *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealth `json:"health,omitempty"` + Library []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibrary `json:"library,omitempty"` + NewCluster *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTask struct { + Concurrency int `json:"concurrency,omitempty"` + Inputs string `json:"inputs"` + Task *DataSourceJobJobSettingsSettingsTaskForEachTaskTask `json:"task,omitempty"` +} + type DataSourceJobJobSettingsSettingsTaskHealthRules struct { Metric string `json:"metric,omitempty"` Op string `json:"op,omitempty"` @@ -630,7 +997,7 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 struct { } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsWorkspace struct { @@ -758,7 +1125,8 @@ type DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard struct { } type DataSourceJobJobSettingsSettingsTaskSqlTaskFile struct { - Path string `json:"path"` + Path string `json:"path"` + Source string `json:"source,omitempty"` } type DataSourceJobJobSettingsSettingsTaskSqlTaskQuery struct { @@ -812,6 +1180,7 @@ type DataSourceJobJobSettingsSettingsTask struct { DbtTask *DataSourceJobJobSettingsSettingsTaskDbtTask `json:"dbt_task,omitempty"` DependsOn []DataSourceJobJobSettingsSettingsTaskDependsOn `json:"depends_on,omitempty"` EmailNotifications *DataSourceJobJobSettingsSettingsTaskEmailNotifications `json:"email_notifications,omitempty"` + ForEachTask *DataSourceJobJobSettingsSettingsTaskForEachTask `json:"for_each_task,omitempty"` Health *DataSourceJobJobSettingsSettingsTaskHealth `json:"health,omitempty"` Library []DataSourceJobJobSettingsSettingsTaskLibrary `json:"library,omitempty"` NewCluster *DataSourceJobJobSettingsSettingsTaskNewCluster `json:"new_cluster,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_storage_credential.go b/bundle/internal/tf/schema/data_source_storage_credential.go new file mode 100644 index 000000000..c7045d445 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_storage_credential.go @@ -0,0 +1,57 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceStorageCredentialStorageCredentialInfoAwsIamRole struct { + ExternalId string `json:"external_id,omitempty"` + RoleArn string `json:"role_arn"` + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` +} + +type DataSourceStorageCredentialStorageCredentialInfoAzureManagedIdentity struct { + AccessConnectorId string `json:"access_connector_id"` + CredentialId string `json:"credential_id,omitempty"` + ManagedIdentityId string `json:"managed_identity_id,omitempty"` +} + +type DataSourceStorageCredentialStorageCredentialInfoAzureServicePrincipal struct { + ApplicationId string `json:"application_id"` + ClientSecret string `json:"client_secret"` + DirectoryId string `json:"directory_id"` +} + +type DataSourceStorageCredentialStorageCredentialInfoCloudflareApiToken struct { + AccessKeyId string `json:"access_key_id"` + AccountId string `json:"account_id"` + SecretAccessKey string `json:"secret_access_key"` +} + +type DataSourceStorageCredentialStorageCredentialInfoDatabricksGcpServiceAccount struct { + CredentialId string `json:"credential_id,omitempty"` + Email string `json:"email,omitempty"` +} + +type DataSourceStorageCredentialStorageCredentialInfo struct { + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + ReadOnly bool `json:"read_only,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + UsedForManagedStorage bool `json:"used_for_managed_storage,omitempty"` + AwsIamRole *DataSourceStorageCredentialStorageCredentialInfoAwsIamRole `json:"aws_iam_role,omitempty"` + AzureManagedIdentity *DataSourceStorageCredentialStorageCredentialInfoAzureManagedIdentity `json:"azure_managed_identity,omitempty"` + AzureServicePrincipal *DataSourceStorageCredentialStorageCredentialInfoAzureServicePrincipal `json:"azure_service_principal,omitempty"` + CloudflareApiToken *DataSourceStorageCredentialStorageCredentialInfoCloudflareApiToken `json:"cloudflare_api_token,omitempty"` + DatabricksGcpServiceAccount *DataSourceStorageCredentialStorageCredentialInfoDatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` +} + +type DataSourceStorageCredential struct { + Id string `json:"id,omitempty"` + Name string `json:"name"` + StorageCredentialInfo *DataSourceStorageCredentialStorageCredentialInfo `json:"storage_credential_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_storage_credentials.go b/bundle/internal/tf/schema/data_source_storage_credentials.go new file mode 100644 index 000000000..153def357 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_storage_credentials.go @@ -0,0 +1,8 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceStorageCredentials struct { + Id string `json:"id,omitempty"` + Names []string `json:"names,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index a88fa2e2e..698cbec93 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -39,6 +39,8 @@ type DataSources struct { SparkVersion map[string]any `json:"databricks_spark_version,omitempty"` SqlWarehouse map[string]any `json:"databricks_sql_warehouse,omitempty"` SqlWarehouses map[string]any `json:"databricks_sql_warehouses,omitempty"` + StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` + StorageCredentials map[string]any `json:"databricks_storage_credentials,omitempty"` Tables map[string]any `json:"databricks_tables,omitempty"` User map[string]any `json:"databricks_user,omitempty"` Views map[string]any `json:"databricks_views,omitempty"` @@ -84,6 +86,8 @@ func NewDataSources() *DataSources { SparkVersion: make(map[string]any), SqlWarehouse: make(map[string]any), SqlWarehouses: make(map[string]any), + StorageCredential: make(map[string]any), + StorageCredentials: make(map[string]any), Tables: make(map[string]any), User: make(map[string]any), Views: make(map[string]any), diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index 1a73b35a4..111efe8d5 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -90,7 +90,7 @@ type ResourceClusterInitScriptsDbfs struct { } type ResourceClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceClusterInitScriptsGcs struct { diff --git a/bundle/internal/tf/schema/resource_file.go b/bundle/internal/tf/schema/resource_file.go new file mode 100644 index 000000000..40a307c9b --- /dev/null +++ b/bundle/internal/tf/schema/resource_file.go @@ -0,0 +1,14 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceFile struct { + ContentBase64 string `json:"content_base64,omitempty"` + FileSize int `json:"file_size,omitempty"` + Id string `json:"id,omitempty"` + Md5 string `json:"md5,omitempty"` + ModificationTime string `json:"modification_time,omitempty"` + Path string `json:"path"` + RemoteFileModified bool `json:"remote_file_modified,omitempty"` + Source string `json:"source,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 96c0c2970..f8d08aefa 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -21,6 +21,7 @@ type ResourceJobDbtTask struct { ProfilesDirectory string `json:"profiles_directory,omitempty"` ProjectDirectory string `json:"project_directory,omitempty"` Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` WarehouseId string `json:"warehouse_id,omitempty"` } @@ -160,7 +161,7 @@ type ResourceJobJobClusterNewClusterInitScriptsS3 struct { } type ResourceJobJobClusterNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobJobClusterNewClusterInitScriptsWorkspace struct { @@ -347,7 +348,7 @@ type ResourceJobNewClusterInitScriptsS3 struct { } type ResourceJobNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobNewClusterInitScriptsWorkspace struct { @@ -482,6 +483,7 @@ type ResourceJobTaskDbtTask struct { ProfilesDirectory string `json:"profiles_directory,omitempty"` ProjectDirectory string `json:"project_directory,omitempty"` Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` WarehouseId string `json:"warehouse_id,omitempty"` } @@ -497,6 +499,371 @@ type ResourceJobTaskEmailNotifications struct { OnSuccess []string `json:"on_success,omitempty"` } +type ResourceJobTaskForEachTaskTaskConditionTask struct { + Left string `json:"left,omitempty"` + Op string `json:"op,omitempty"` + Right string `json:"right,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskDbtTask struct { + Catalog string `json:"catalog,omitempty"` + Commands []string `json:"commands"` + ProfilesDirectory string `json:"profiles_directory,omitempty"` + ProjectDirectory string `json:"project_directory,omitempty"` + Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskDependsOn struct { + Outcome string `json:"outcome,omitempty"` + TaskKey string `json:"task_key"` +} + +type ResourceJobTaskForEachTaskTaskEmailNotifications struct { + OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []string `json:"on_failure,omitempty"` + OnStart []string `json:"on_start,omitempty"` + OnSuccess []string `json:"on_success,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskHealthRules struct { + Metric string `json:"metric,omitempty"` + Op string `json:"op,omitempty"` + Value int `json:"value,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskHealth struct { + Rules []ResourceJobTaskForEachTaskTaskHealthRules `json:"rules,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobTaskForEachTaskTaskLibraryPypi `json:"pypi,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterAutoscale struct { + MaxWorkers int `json:"max_workers,omitempty"` + MinWorkers int `json:"min_workers,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes struct { + Availability string `json:"availability,omitempty"` + EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeType string `json:"ebs_volume_type,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` + ZoneId string `json:"zone_id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes struct { + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfDbfs struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfS3 struct { + CannedAcl string `json:"canned_acl,omitempty"` + Destination string `json:"destination"` + EnableEncryption bool `json:"enable_encryption,omitempty"` + EncryptionType string `json:"encryption_type,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + KmsKey string `json:"kms_key,omitempty"` + Region string `json:"region,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConf struct { + Dbfs *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"` + S3 *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfS3 `json:"s3,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo struct { + MountOptions string `json:"mount_options,omitempty"` + ServerAddress string `json:"server_address"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterClusterMountInfo struct { + LocalMountDirPath string `json:"local_mount_dir_path"` + RemoteMountDirPath string `json:"remote_mount_dir_path,omitempty"` + NetworkFilesystemInfo *ResourceJobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo `json:"network_filesystem_info,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterDockerImageBasicAuth struct { + Password string `json:"password"` + Username string `json:"username"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterDockerImage struct { + Url string `json:"url"` + BasicAuth *ResourceJobTaskForEachTaskTaskNewClusterDockerImageBasicAuth `json:"basic_auth,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterGcpAttributes struct { + Availability string `json:"availability,omitempty"` + BootDiskSize int `json:"boot_disk_size,omitempty"` + GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` + UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` + ZoneId string `json:"zone_id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsAbfss struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsDbfs struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsFile struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsGcs struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsS3 struct { + CannedAcl string `json:"canned_acl,omitempty"` + Destination string `json:"destination"` + EnableEncryption bool `json:"enable_encryption,omitempty"` + EncryptionType string `json:"encryption_type,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + KmsKey string `json:"kms_key,omitempty"` + Region string `json:"region,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsVolumes struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsWorkspace struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScripts struct { + Abfss *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"` + Dbfs *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"` + File *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsFile `json:"file,omitempty"` + Gcs *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"` + S3 *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsVolumes `json:"volumes,omitempty"` + Workspace *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterWorkloadTypeClients struct { + Jobs bool `json:"jobs,omitempty"` + Notebooks bool `json:"notebooks,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterWorkloadType struct { + Clients *ResourceJobTaskForEachTaskTaskNewClusterWorkloadTypeClients `json:"clients,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewCluster struct { + ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + ClusterId string `json:"cluster_id,omitempty"` + ClusterName string `json:"cluster_name,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + DataSecurityMode string `json:"data_security_mode,omitempty"` + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + IdempotencyToken string `json:"idempotency_token,omitempty"` + InstancePoolId string `json:"instance_pool_id,omitempty"` + NodeTypeId string `json:"node_type_id,omitempty"` + NumWorkers int `json:"num_workers"` + PolicyId string `json:"policy_id,omitempty"` + RuntimeEngine string `json:"runtime_engine,omitempty"` + SingleUserName string `json:"single_user_name,omitempty"` + SparkConf map[string]string `json:"spark_conf,omitempty"` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + SparkVersion string `json:"spark_version"` + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + Autoscale *ResourceJobTaskForEachTaskTaskNewClusterAutoscale `json:"autoscale,omitempty"` + AwsAttributes *ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"` + AzureAttributes *ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"` + ClusterLogConf *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConf `json:"cluster_log_conf,omitempty"` + ClusterMountInfo []ResourceJobTaskForEachTaskTaskNewClusterClusterMountInfo `json:"cluster_mount_info,omitempty"` + DockerImage *ResourceJobTaskForEachTaskTaskNewClusterDockerImage `json:"docker_image,omitempty"` + GcpAttributes *ResourceJobTaskForEachTaskTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` + InitScripts []ResourceJobTaskForEachTaskTaskNewClusterInitScripts `json:"init_scripts,omitempty"` + WorkloadType *ResourceJobTaskForEachTaskTaskNewClusterWorkloadType `json:"workload_type,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNotebookTask struct { + BaseParameters map[string]string `json:"base_parameters,omitempty"` + NotebookPath string `json:"notebook_path"` + Source string `json:"source,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNotificationSettings struct { + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"` + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskPipelineTask struct { + FullRefresh bool `json:"full_refresh,omitempty"` + PipelineId string `json:"pipeline_id"` +} + +type ResourceJobTaskForEachTaskTaskPythonWheelTask struct { + EntryPoint string `json:"entry_point,omitempty"` + NamedParameters map[string]string `json:"named_parameters,omitempty"` + PackageName string `json:"package_name,omitempty"` + Parameters []string `json:"parameters,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskRunJobTask struct { + JobId int `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSparkJarTask struct { + JarUri string `json:"jar_uri,omitempty"` + MainClassName string `json:"main_class_name,omitempty"` + Parameters []string `json:"parameters,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSparkPythonTask struct { + Parameters []string `json:"parameters,omitempty"` + PythonFile string `json:"python_file"` + Source string `json:"source,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSparkSubmitTask struct { + Parameters []string `json:"parameters,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskAlertSubscriptions struct { + DestinationId string `json:"destination_id,omitempty"` + UserName string `json:"user_name,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskAlert struct { + AlertId string `json:"alert_id"` + PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` + Subscriptions []ResourceJobTaskForEachTaskTaskSqlTaskAlertSubscriptions `json:"subscriptions,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskDashboardSubscriptions struct { + DestinationId string `json:"destination_id,omitempty"` + UserName string `json:"user_name,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskDashboard struct { + CustomSubject string `json:"custom_subject,omitempty"` + DashboardId string `json:"dashboard_id"` + PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` + Subscriptions []ResourceJobTaskForEachTaskTaskSqlTaskDashboardSubscriptions `json:"subscriptions,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskFile struct { + Path string `json:"path"` + Source string `json:"source,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskQuery struct { + QueryId string `json:"query_id"` +} + +type ResourceJobTaskForEachTaskTaskSqlTask struct { + Parameters map[string]string `json:"parameters,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` + Alert *ResourceJobTaskForEachTaskTaskSqlTaskAlert `json:"alert,omitempty"` + Dashboard *ResourceJobTaskForEachTaskTaskSqlTaskDashboard `json:"dashboard,omitempty"` + File *ResourceJobTaskForEachTaskTaskSqlTaskFile `json:"file,omitempty"` + Query *ResourceJobTaskForEachTaskTaskSqlTaskQuery `json:"query,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { + Id string `json:"id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnFailure struct { + Id string `json:"id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStart struct { + Id string `json:"id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { + Id string `json:"id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotifications struct { + OnDurationWarningThresholdExceeded []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` + OnStart []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnSuccess []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` +} + +type ResourceJobTaskForEachTaskTask struct { + ComputeKey string `json:"compute_key,omitempty"` + Description string `json:"description,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key,omitempty"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` + Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` +} + +type ResourceJobTaskForEachTask struct { + Concurrency int `json:"concurrency,omitempty"` + Inputs string `json:"inputs"` + Task *ResourceJobTaskForEachTaskTask `json:"task,omitempty"` +} + type ResourceJobTaskHealthRules struct { Metric string `json:"metric,omitempty"` Op string `json:"op,omitempty"` @@ -630,7 +997,7 @@ type ResourceJobTaskNewClusterInitScriptsS3 struct { } type ResourceJobTaskNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobTaskNewClusterInitScriptsWorkspace struct { @@ -758,7 +1125,8 @@ type ResourceJobTaskSqlTaskDashboard struct { } type ResourceJobTaskSqlTaskFile struct { - Path string `json:"path"` + Path string `json:"path"` + Source string `json:"source,omitempty"` } type ResourceJobTaskSqlTaskQuery struct { @@ -812,6 +1180,7 @@ type ResourceJobTask struct { DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` + ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"` Health *ResourceJobTaskHealth `json:"health,omitempty"` Library []ResourceJobTaskLibrary `json:"library,omitempty"` NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 8737985c9..3cad9ac41 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -78,7 +78,7 @@ type ResourcePipelineClusterInitScriptsS3 struct { } type ResourcePipelineClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourcePipelineClusterInitScriptsWorkspace struct { diff --git a/bundle/internal/tf/schema/resource_vector_search_endpoint.go b/bundle/internal/tf/schema/resource_vector_search_endpoint.go new file mode 100644 index 000000000..392c78611 --- /dev/null +++ b/bundle/internal/tf/schema/resource_vector_search_endpoint.go @@ -0,0 +1,16 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceVectorSearchEndpoint struct { + CreationTimestamp int `json:"creation_timestamp,omitempty"` + Creator string `json:"creator,omitempty"` + EndpointId string `json:"endpoint_id,omitempty"` + EndpointStatus []any `json:"endpoint_status,omitempty"` + EndpointType string `json:"endpoint_type"` + Id string `json:"id,omitempty"` + LastUpdatedTimestamp int `json:"last_updated_timestamp,omitempty"` + LastUpdatedUser string `json:"last_updated_user,omitempty"` + Name string `json:"name"` + NumIndexes int `json:"num_indexes,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_volume.go b/bundle/internal/tf/schema/resource_volume.go index 77d499a69..4a82d8e85 100644 --- a/bundle/internal/tf/schema/resource_volume.go +++ b/bundle/internal/tf/schema/resource_volume.go @@ -10,5 +10,6 @@ type ResourceVolume struct { Owner string `json:"owner,omitempty"` SchemaName string `json:"schema_name"` StorageLocation string `json:"storage_location,omitempty"` + VolumePath string `json:"volume_path,omitempty"` VolumeType string `json:"volume_type"` } diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index 57f11d4b4..4cc81e7e7 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -19,6 +19,7 @@ type Resources struct { Directory map[string]any `json:"databricks_directory,omitempty"` Entitlements map[string]any `json:"databricks_entitlements,omitempty"` ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` + File map[string]any `json:"databricks_file,omitempty"` GitCredential map[string]any `json:"databricks_git_credential,omitempty"` GlobalInitScript map[string]any `json:"databricks_global_init_script,omitempty"` Grant map[string]any `json:"databricks_grant,omitempty"` @@ -82,6 +83,7 @@ type Resources struct { User map[string]any `json:"databricks_user,omitempty"` UserInstanceProfile map[string]any `json:"databricks_user_instance_profile,omitempty"` UserRole map[string]any `json:"databricks_user_role,omitempty"` + VectorSearchEndpoint map[string]any `json:"databricks_vector_search_endpoint,omitempty"` Volume map[string]any `json:"databricks_volume,omitempty"` WorkspaceConf map[string]any `json:"databricks_workspace_conf,omitempty"` WorkspaceFile map[string]any `json:"databricks_workspace_file,omitempty"` @@ -105,6 +107,7 @@ func NewResources() *Resources { Directory: make(map[string]any), Entitlements: make(map[string]any), ExternalLocation: make(map[string]any), + File: make(map[string]any), GitCredential: make(map[string]any), GlobalInitScript: make(map[string]any), Grant: make(map[string]any), @@ -168,6 +171,7 @@ func NewResources() *Resources { User: make(map[string]any), UserInstanceProfile: make(map[string]any), UserRole: make(map[string]any), + VectorSearchEndpoint: make(map[string]any), Volume: make(map[string]any), WorkspaceConf: make(map[string]any), WorkspaceFile: make(map[string]any), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 963ae1460..f0253c285 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -25,7 +25,7 @@ func NewRoot() *Root { "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ "source": "databricks/databricks", - "version": "1.36.2", + "version": "1.37.0", }, }, }, From fb8f415e29efcdc9156a6b4d2b6522f6d0c25383 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 23 Feb 2024 14:57:37 +0100 Subject: [PATCH 055/286] Release v0.214.1 (#1236) CLI: * Improved error message when no .databrickscfg ([#1223](https://github.com/databricks/cli/pull/1223)). * Use Go SDK Iterators when listing resources with the CLI ([#1202](https://github.com/databricks/cli/pull/1202)). Bundles: * Only set ComputeID value when `--compute-id` flag provided ([#1229](https://github.com/databricks/cli/pull/1229)). * Add correct tag value for models in dev mode ([#1230](https://github.com/databricks/cli/pull/1230)). * Upgrade Terraform provider to 1.37.0 ([#1235](https://github.com/databricks/cli/pull/1235)). Internal: * Fix CLI nightlies on our UC workspaces ([#1225](https://github.com/databricks/cli/pull/1225)). * Handle alias types for map keys in toTyped conversion ([#1232](https://github.com/databricks/cli/pull/1232)). --- CHANGELOG.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 88dbc71bb..72a6608da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,22 @@ # Version changelog +## 0.214.1 + +CLI: + * Improved error message when no .databrickscfg ([#1223](https://github.com/databricks/cli/pull/1223)). + * Use Go SDK Iterators when listing resources with the CLI ([#1202](https://github.com/databricks/cli/pull/1202)). + +Bundles: + * Only set ComputeID value when `--compute-id` flag provided ([#1229](https://github.com/databricks/cli/pull/1229)). + * Add correct tag value for models in dev mode ([#1230](https://github.com/databricks/cli/pull/1230)). + * Upgrade Terraform provider to 1.37.0 ([#1235](https://github.com/databricks/cli/pull/1235)). + +Internal: + * Fix CLI nightlies on our UC workspaces ([#1225](https://github.com/databricks/cli/pull/1225)). + * Handle alias types for map keys in toTyped conversion ([#1232](https://github.com/databricks/cli/pull/1232)). + + + ## 0.214.0 CLI: From 0839e6f66a344cb2bc00463b5b63d521bf2e1263 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 26 Feb 2024 11:08:03 +0100 Subject: [PATCH 056/286] Added test to verify scripts.Execute mutator works correctly (#1237) ## Changes Follow up to https://github.com/databricks/cli/pull/1232 --- bundle/scripts/scripts_test.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/bundle/scripts/scripts_test.go b/bundle/scripts/scripts_test.go index a8835b599..bc3202e06 100644 --- a/bundle/scripts/scripts_test.go +++ b/bundle/scripts/scripts_test.go @@ -34,3 +34,18 @@ func TestExecutesHook(t *testing.T) { require.NoError(t, err) require.Equal(t, "Hello", strings.TrimSpace(line)) } + +func TestExecuteMutator(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Experimental: &config.Experimental{ + Scripts: map[config.ScriptHook]config.Command{ + config.ScriptPreBuild: "echo 'Hello'", + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, Execute(config.ScriptPreInit)) + require.NoError(t, err) +} From d12f88e24dade5b7220c11b07fd44880fc98017e Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Fri, 1 Mar 2024 09:25:12 +0100 Subject: [PATCH 057/286] Fix summary command when internal terraform config doesn't exist (#1242) Check if `bundle.tf.json` doesn't exist and create it before executing `terraform init` (inside `terraform.Load`) Fixes a problem when during `terraform.Load` it fails with: ``` Error: Failed to load plugin schemas Error while loading schemas for plugin components: Failed to obtain provider schema: Could not load the schema for provider registry.terraform.io/databricks/databricks: failed to instantiate provider "registry.terraform.io/databricks/databricks" to obtain schema: unavailable provider "registry.terraform.io/databricks/databricks".. ``` --- bundle/deploy/terraform/pkg.go | 1 + bundle/deploy/terraform/write.go | 2 +- cmd/bundle/summary.go | 11 ++++++++--- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/bundle/deploy/terraform/pkg.go b/bundle/deploy/terraform/pkg.go index 5e3807be7..2d9293d1b 100644 --- a/bundle/deploy/terraform/pkg.go +++ b/bundle/deploy/terraform/pkg.go @@ -1,3 +1,4 @@ package terraform const TerraformStateFileName = "terraform.tfstate" +const TerraformConfigFileName = "bundle.tf.json" diff --git a/bundle/deploy/terraform/write.go b/bundle/deploy/terraform/write.go index 3ec1b5812..e688f6a61 100644 --- a/bundle/deploy/terraform/write.go +++ b/bundle/deploy/terraform/write.go @@ -32,7 +32,7 @@ func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error { return err } - f, err := os.Create(filepath.Join(dir, "bundle.tf.json")) + f, err := os.Create(filepath.Join(dir, TerraformConfigFileName)) if err != nil { return err } diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index 596f7d3d8..44c79f5d7 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -42,11 +42,16 @@ func newSummaryCommand() *cobra.Command { if err != nil { return err } - _, err = os.Stat(filepath.Join(cacheDir, terraform.TerraformStateFileName)) - noCache := errors.Is(err, os.ErrNotExist) + _, stateFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformStateFileName)) + _, configFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformConfigFileName)) + noCache := errors.Is(stateFileErr, os.ErrNotExist) || errors.Is(configFileErr, os.ErrNotExist) if forcePull || noCache { - err = bundle.Apply(cmd.Context(), b, terraform.StatePull()) + err = bundle.Apply(cmd.Context(), b, bundle.Seq( + terraform.StatePull(), + terraform.Interpolate(), + terraform.Write(), + )) if err != nil { return err } From e1407038d37557e3100a1c1b893703653df77b01 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 1 Mar 2024 16:50:20 +0100 Subject: [PATCH 058/286] Configure cobra.NoArgs for bundle commands where applicable (#1250) ## Changes Return an error if unused arguments are passed to these commands. ## Tests n/a --- cmd/bundle/deploy.go | 1 + cmd/bundle/destroy.go | 6 +++--- cmd/bundle/run.go | 5 ++--- cmd/bundle/schema.go | 1 + cmd/bundle/summary.go | 6 +++--- cmd/bundle/validate.go | 6 +++--- 6 files changed, 13 insertions(+), 12 deletions(-) diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index cd3a2a368..60426ecad 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -13,6 +13,7 @@ func newDeployCommand() *cobra.Command { cmd := &cobra.Command{ Use: "deploy", Short: "Deploy bundle", + Args: cobra.NoArgs, PreRunE: utils.ConfigureBundleWithVariables, } diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index 958681f06..b27161f98 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -16,9 +16,9 @@ import ( func newDestroyCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "destroy", - Short: "Destroy deployed bundle resources", - + Use: "destroy", + Short: "Destroy deployed bundle resources", + Args: cobra.NoArgs, PreRunE: utils.ConfigureBundleWithVariables, } diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 54aa6ae75..8814bee0b 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -17,9 +17,8 @@ import ( func newRunCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "run [flags] KEY", - Short: "Run a resource (e.g. a job or a pipeline)", - + Use: "run [flags] KEY", + Short: "Run a resource (e.g. a job or a pipeline)", Args: cobra.MaximumNArgs(1), PreRunE: utils.ConfigureBundleWithVariables, } diff --git a/cmd/bundle/schema.go b/cmd/bundle/schema.go index f516695c7..eb0c1fc9e 100644 --- a/cmd/bundle/schema.go +++ b/cmd/bundle/schema.go @@ -13,6 +13,7 @@ func newSchemaCommand() *cobra.Command { cmd := &cobra.Command{ Use: "schema", Short: "Generate JSON Schema for bundle configuration", + Args: cobra.NoArgs, } cmd.RunE = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index 44c79f5d7..8b475661f 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -18,9 +18,9 @@ import ( func newSummaryCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "summary", - Short: "Describe the bundle resources and their deployment states", - + Use: "summary", + Short: "Describe the bundle resources and their deployment states", + Args: cobra.NoArgs, PreRunE: utils.ConfigureBundleWithVariables, // This command is currently intended for the Databricks VSCode extension only diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index f235e097b..9a5bf1e9a 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -12,9 +12,9 @@ import ( func newValidateCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "validate", - Short: "Validate configuration", - + Use: "validate", + Short: "Validate configuration", + Args: cobra.NoArgs, PreRunE: utils.ConfigureBundleWithVariables, } From 58e1db58b17f24779dd7c7f69810bb2fdf5a3828 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 1 Mar 2024 16:59:47 +0100 Subject: [PATCH 059/286] Fixed building Python artifacts on Windows with WSL (#1249) ## Changes Fixed building Python artifacts on Windows with WSL Fixes #1243 --- libs/exec/shell_bash.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/libs/exec/shell_bash.go b/libs/exec/shell_bash.go index bb8c6c514..9f6b508f4 100644 --- a/libs/exec/shell_bash.go +++ b/libs/exec/shell_bash.go @@ -34,8 +34,12 @@ func newBashShell() (shell, error) { return nil, nil } + // Convert to lowercase for case-insensitive comparison + // Some systems may return some parts of the path in uppercase. + outLower := strings.ToLower(out) // Skipping WSL bash if found one - if strings.Contains(out, `\Windows\System32\bash.exe`) || strings.Contains(out, `\Microsoft\WindowsApps\bash.exe`) { + if strings.Contains(outLower, `\windows\system32\bash.exe`) || + strings.Contains(outLower, `\microsoft\windowsapps\bash.exe`) { return nil, nil } From 04827688fb2fb51a191a8d8a1dbb0cdb29b70b90 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 4 Mar 2024 09:38:32 +0100 Subject: [PATCH 060/286] Add `--validate-only` flag to run validate-only pipeline update (#1251) ## Changes This flag starts a "validation-only" update. ## Tests Unit and manual confirmation it does what it should. --- bundle/run/pipeline_options.go | 8 ++++++++ bundle/run/pipeline_options_test.go | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/bundle/run/pipeline_options.go b/bundle/run/pipeline_options.go index 4917f9db3..6c8c1e8c7 100644 --- a/bundle/run/pipeline_options.go +++ b/bundle/run/pipeline_options.go @@ -22,6 +22,9 @@ type PipelineOptions struct { // List of tables to reset and recompute. FullRefresh []string + + // Perform an update to validate graph correctness. + ValidateOnly bool } func (o *PipelineOptions) Define(fs *flag.FlagSet) { @@ -29,6 +32,7 @@ func (o *PipelineOptions) Define(fs *flag.FlagSet) { fs.StringSliceVar(&o.Refresh, "refresh", nil, "List of tables to update.") fs.BoolVar(&o.FullRefreshAll, "full-refresh-all", false, "Perform a full graph reset and recompute.") fs.StringSliceVar(&o.FullRefresh, "full-refresh", nil, "List of tables to reset and recompute.") + fs.BoolVar(&o.ValidateOnly, "validate-only", false, "Perform an update to validate graph correctness.") } // Validate returns if the combination of options is valid. @@ -46,6 +50,9 @@ func (o *PipelineOptions) Validate(pipeline *resources.Pipeline) error { if len(o.FullRefresh) > 0 { set = append(set, "--full-refresh") } + if o.ValidateOnly { + set = append(set, "--validate-only") + } if len(set) > 1 { return fmt.Errorf("pipeline run arguments are mutually exclusive (got %s)", strings.Join(set, ", ")) } @@ -63,6 +70,7 @@ func (o *PipelineOptions) toPayload(pipeline *resources.Pipeline, pipelineID str RefreshSelection: o.Refresh, FullRefresh: o.FullRefreshAll, FullRefreshSelection: o.FullRefresh, + ValidateOnly: o.ValidateOnly, } return payload, nil } diff --git a/bundle/run/pipeline_options_test.go b/bundle/run/pipeline_options_test.go index 3048a4d8c..b42de8c07 100644 --- a/bundle/run/pipeline_options_test.go +++ b/bundle/run/pipeline_options_test.go @@ -43,12 +43,20 @@ func TestPipelineOptionsFullRefresh(t *testing.T) { assert.Equal(t, []string{"arg1", "arg2", "arg3"}, opts.FullRefresh) } +func TestPipelineOptionsValidateOnly(t *testing.T) { + fs, opts := setupPipelineOptions(t) + err := fs.Parse([]string{`--validate-only`}) + require.NoError(t, err) + assert.True(t, opts.ValidateOnly) +} + func TestPipelineOptionsValidateSuccessWithSingleOption(t *testing.T) { args := []string{ `--refresh-all`, `--refresh=arg1,arg2,arg3`, `--full-refresh-all`, `--full-refresh=arg1,arg2,arg3`, + `--validate-only`, } for _, arg := range args { fs, opts := setupPipelineOptions(t) @@ -65,6 +73,7 @@ func TestPipelineOptionsValidateFailureWithMultipleOptions(t *testing.T) { `--refresh=arg1,arg2,arg3`, `--full-refresh-all`, `--full-refresh=arg1,arg2,arg3`, + `--validate-only`, } for i := range args { for j := range args { From 982f1b5398e4feb9f7cea0c5e31b33964be14847 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 12:24:43 +0100 Subject: [PATCH 061/286] Bump github.com/stretchr/testify from 1.8.4 to 1.9.0 (#1252) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.4 to 1.9.0.
Release notes

Sourced from github.com/stretchr/testify's releases.

v1.9.0

What's Changed

... (truncated)

Commits
  • bb548d0 Merge pull request #1552 from stretchr/dependabot/go_modules/github.com/stret...
  • 814075f build(deps): bump github.com/stretchr/objx from 0.5.1 to 0.5.2
  • e045612 Merge pull request #1339 from bogdandrutu/uintptr
  • 5b6926d Merge pull request #1385 from hslatman/not-implements
  • 9f97d67 Merge pull request #1550 from stretchr/release-notes
  • bcb0d3f Include the auto-release notes in releases
  • fb770f8 Merge pull request #1247 from ccoVeille/typos
  • 85d8bb6 fix typos in comments, tests and github templates
  • e2741fa Merge pull request #1548 from arjunmahishi/msgAndArgs
  • 6e59f20 http_assertions: assert that the msgAndArgs actually works in tests
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/stretchr/testify&package-manager=go_modules&previous-version=1.8.4&new-version=1.9.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index dc01266cb..49521f64c 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // MIT github.com/spf13/cobra v1.8.0 // Apache 2.0 github.com/spf13/pflag v1.0.5 // BSD-3-Clause - github.com/stretchr/testify v1.8.4 // MIT + github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/mod v0.15.0 golang.org/x/oauth2 v0.17.0 @@ -51,7 +51,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect diff --git a/go.sum b/go.sum index bbab6fc34..4b273bc3c 100644 --- a/go.sum +++ b/go.sum @@ -144,14 +144,15 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= From 29ab96f3276eb9d7cc3fcefb79bd96c8e838efcd Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 4 Mar 2024 13:34:03 +0100 Subject: [PATCH 062/286] Only transform wheel libraries when using trampoline (#1248) ## Changes Only transform wheel libraries when using trampoline ## Tests Added regression test --- bundle/python/conditional_transform_test.go | 4 +++- bundle/python/transform.go | 19 +++++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/bundle/python/conditional_transform_test.go b/bundle/python/conditional_transform_test.go index 5bf337216..4c7cad5c5 100644 --- a/bundle/python/conditional_transform_test.go +++ b/bundle/python/conditional_transform_test.go @@ -81,6 +81,7 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { }, Libraries: []compute.Library{ {Whl: "/Workspace/Users/test@test.com/bundle/dist/test.whl"}, + {Jar: "/Workspace/Users/test@test.com/bundle/dist/test.jar"}, }, }, }, @@ -110,5 +111,6 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { require.Equal(t, path.Join(filepath.ToSlash(internalDirRel), "notebook_job1_key1"), task.NotebookTask.NotebookPath) - require.Empty(t, task.Libraries) + require.Len(t, task.Libraries, 1) + require.Equal(t, "/Workspace/Users/test@test.com/bundle/dist/test.jar", task.Libraries[0].Jar) } diff --git a/bundle/python/transform.go b/bundle/python/transform.go index a3fea2e87..728d4e83d 100644 --- a/bundle/python/transform.go +++ b/bundle/python/transform.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -79,7 +80,14 @@ type pythonTrampoline struct{} func (t *pythonTrampoline) CleanUp(task *jobs.Task) error { task.PythonWheelTask = nil - task.Libraries = nil + + nonWheelLibraries := make([]compute.Library, 0) + for _, l := range task.Libraries { + if l.Whl == "" { + nonWheelLibraries = append(nonWheelLibraries, l) + } + } + task.Libraries = nonWheelLibraries return nil } @@ -115,12 +123,19 @@ func needsTrampoline(task *jobs.Task) bool { func (t *pythonTrampoline) GetTemplateData(task *jobs.Task) (map[string]any, error) { params, err := t.generateParameters(task.PythonWheelTask) + whlLibraries := make([]compute.Library, 0) + for _, l := range task.Libraries { + if l.Whl != "" { + whlLibraries = append(whlLibraries, l) + } + } + if err != nil { return nil, err } data := map[string]any{ - "Libraries": task.Libraries, + "Libraries": whlLibraries, "Params": params, "Task": task.PythonWheelTask, } From 09d1846e13a5ca875d943609c268d95da3c37b1b Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 4 Mar 2024 17:12:10 +0100 Subject: [PATCH 063/286] Return `application_id` for service principal lookups (#1245) ## Changes Return ApplicationId for service principals lookups Fixes #1234 ## Tests Added (regression) tests --- .codegen/lookup.go.tmpl | 12 ++++---- .../resolve_resource_references_test.go | 28 +++++++++++++++++++ bundle/config/variable/lookup.go | 2 +- 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/.codegen/lookup.go.tmpl b/.codegen/lookup.go.tmpl index a982f151a..7e643a90c 100644 --- a/.codegen/lookup.go.tmpl +++ b/.codegen/lookup.go.tmpl @@ -18,6 +18,11 @@ package variable "warehouses" }} +{{ $customField := + dict + "service-principals" "ApplicationId" +}} + import ( "context" "fmt" @@ -116,15 +121,10 @@ func allResolvers() *resolvers { return "", err } - return fmt.Sprint(entity{{ template "field-path" .List.NamedIdMap.IdPath }}), nil + return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .List.NamedIdMap.IdPath 0).PascalName) }}), nil } {{end -}} {{- end}} return r } - - -{{- define "field-path" -}} - {{- range .}}.{{.PascalName}}{{end}} -{{- end -}} diff --git a/bundle/config/mutator/resolve_resource_references_test.go b/bundle/config/mutator/resolve_resource_references_test.go index 4d51285c6..5f5dab316 100644 --- a/bundle/config/mutator/resolve_resource_references_test.go +++ b/bundle/config/mutator/resolve_resource_references_test.go @@ -13,6 +13,7 @@ import ( "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/iam" ) func TestResolveClusterReference(t *testing.T) { @@ -105,3 +106,30 @@ func TestNoLookupIfVariableIsSet(t *testing.T) { require.NoError(t, err) require.Equal(t, "random value", *b.Config.Variables["my-cluster-id"].Value) } + +func TestResolveServicePrincipal(t *testing.T) { + spName := "Some SP name" + b := &bundle.Bundle{ + Config: config.Root{ + Variables: map[string]*variable.Variable{ + "my-sp": { + Lookup: &variable.Lookup{ + ServicePrincipal: spName, + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + spApi := m.GetMockServicePrincipalsAPI() + spApi.EXPECT().GetByDisplayName(mock.Anything, spName).Return(&iam.ServicePrincipal{ + Id: "1234", + ApplicationId: "app-1234", + }, nil) + + err := bundle.Apply(context.Background(), b, ResolveResourceReferences()) + require.NoError(t, err) + require.Equal(t, "app-1234", *b.Config.Variables["my-sp"].Value) +} diff --git a/bundle/config/variable/lookup.go b/bundle/config/variable/lookup.go index 3b29783eb..56d2ca810 100755 --- a/bundle/config/variable/lookup.go +++ b/bundle/config/variable/lookup.go @@ -297,7 +297,7 @@ func allResolvers() *resolvers { return "", err } - return fmt.Sprint(entity.Id), nil + return fmt.Sprint(entity.ApplicationId), nil } r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { entity, err := w.Warehouses.GetByName(ctx, name) From ecf9c52f6141e7d97d49c8c1c3fb995ea4a1289f Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 4 Mar 2024 21:28:15 +0100 Subject: [PATCH 064/286] Support relative paths in artifact files source section and always upload all artifact files (#1247) Support relative paths in artifact files source section and always upload all artifact files Fixes #1156 ## Tests Added unit tests --- bundle/artifacts/artifacts.go | 14 ++--- bundle/artifacts/build.go | 3 +- bundle/artifacts/upload.go | 31 +++++++++++ bundle/artifacts/upload_test.go | 98 +++++++++++++++++++++++++++++++++ 4 files changed, 138 insertions(+), 8 deletions(-) create mode 100644 bundle/artifacts/upload_test.go diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index e474240de..ce2e165b7 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -121,13 +121,6 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u for i := range a.Files { f := &a.Files[i] - // Lookup all tasks that reference this file. - libs, ok := filesToLibraries[f.Source] - if !ok { - log.Debugf(ctx, "No tasks reference %s. Skipping upload.", f.Source) - continue - } - filename := filepath.Base(f.Source) cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename)) @@ -139,6 +132,13 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u log.Infof(ctx, "Upload succeeded") f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) + // Lookup all tasks that reference this file. + libs, ok := filesToLibraries[f.Source] + if !ok { + log.Debugf(ctx, "No tasks reference %s", f.Source) + continue + } + // Update all tasks that reference this file. for _, lib := range libs { wsfsBase := "/Workspace" diff --git a/bundle/artifacts/build.go b/bundle/artifacts/build.go index 6b1aac822..a78958e60 100644 --- a/bundle/artifacts/build.go +++ b/bundle/artifacts/build.go @@ -49,7 +49,8 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { } if !filepath.IsAbs(artifact.Path) { - artifact.Path = filepath.Join(b.Config.Path, artifact.Path) + dirPath := filepath.Dir(artifact.ConfigFilePath) + artifact.Path = filepath.Join(dirPath, artifact.Path) } return bundle.Apply(ctx, b, getBuildMutator(artifact.Type, m.name)) diff --git a/bundle/artifacts/upload.go b/bundle/artifacts/upload.go index 990718aa4..61e652086 100644 --- a/bundle/artifacts/upload.go +++ b/bundle/artifacts/upload.go @@ -3,8 +3,10 @@ package artifacts import ( "context" "fmt" + "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/databricks-sdk-go/service/workspace" ) @@ -41,6 +43,35 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { return fmt.Errorf("artifact source is not configured: %s", m.name) } + // Check if source paths are absolute, if not, make them absolute + for k := range artifact.Files { + f := &artifact.Files[k] + if !filepath.IsAbs(f.Source) { + dirPath := filepath.Dir(artifact.ConfigFilePath) + f.Source = filepath.Join(dirPath, f.Source) + } + } + + // Expand any glob reference in files source path + files := make([]config.ArtifactFile, 0, len(artifact.Files)) + for _, f := range artifact.Files { + matches, err := filepath.Glob(f.Source) + if err != nil { + return fmt.Errorf("unable to find files for %s: %w", f.Source, err) + } + + if len(matches) == 0 { + return fmt.Errorf("no files found for %s", f.Source) + } + + for _, match := range matches { + files = append(files, config.ArtifactFile{ + Source: match, + }) + } + } + + artifact.Files = files return bundle.Apply(ctx, b, getUploadMutator(artifact.Type, m.name)) } diff --git a/bundle/artifacts/upload_test.go b/bundle/artifacts/upload_test.go new file mode 100644 index 000000000..6dea1c145 --- /dev/null +++ b/bundle/artifacts/upload_test.go @@ -0,0 +1,98 @@ +package artifacts + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/testfile" + "github.com/stretchr/testify/require" +) + +type noop struct{} + +func (n *noop) Apply(context.Context, *bundle.Bundle) error { + return nil +} + +func (n *noop) Name() string { + return "noop" +} + +func TestExpandGlobFilesSource(t *testing.T) { + rootPath := t.TempDir() + err := os.Mkdir(filepath.Join(rootPath, "test"), 0755) + require.NoError(t, err) + + t1 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar1.jar")) + t1.Close(t) + + t2 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar2.jar")) + t2.Close(t) + + b := &bundle.Bundle{ + Config: config.Root{ + Path: rootPath, + Artifacts: map[string]*config.Artifact{ + "test": { + Type: "custom", + Files: []config.ArtifactFile{ + { + Source: filepath.Join("..", "test", "*.jar"), + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml")) + + u := &upload{"test"} + uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { + return &noop{} + } + + err = bundle.Apply(context.Background(), b, u) + require.NoError(t, err) + + require.Equal(t, 2, len(b.Config.Artifacts["test"].Files)) + require.Equal(t, filepath.Join(rootPath, "test", "myjar1.jar"), b.Config.Artifacts["test"].Files[0].Source) + require.Equal(t, filepath.Join(rootPath, "test", "myjar2.jar"), b.Config.Artifacts["test"].Files[1].Source) +} + +func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) { + rootPath := t.TempDir() + err := os.Mkdir(filepath.Join(rootPath, "test"), 0755) + require.NoError(t, err) + + b := &bundle.Bundle{ + Config: config.Root{ + Path: rootPath, + Artifacts: map[string]*config.Artifact{ + "test": { + Type: "custom", + Files: []config.ArtifactFile{ + { + Source: filepath.Join("..", "test", "myjar.jar"), + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml")) + + u := &upload{"test"} + uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { + return &noop{} + } + + err = bundle.Apply(context.Background(), b, u) + require.ErrorContains(t, err, "no files found for") +} From e61f0e1eb93361d2553037d3616540ecd006f4ca Mon Sep 17 00:00:00 2001 From: Fabian Jakobs Date: Tue, 5 Mar 2024 15:31:27 +0100 Subject: [PATCH 065/286] Fix DBConnect support in VS Code (#1253) ## Changes With the current template, we can't execute the Python file and the jobs notebook using DBConnect from VSCode because we import `from pyspark.sql import SparkSession`, which doesn't support Databricks unified auth. This PR fixes this by passing spark into the library code and by explicitly instantiating a spark session where the spark global is not available. Other changes: * add auto-reload to notebooks * add DLT typings for code completion --- .../requirements-dev.txt.tmpl | 3 +++ .../scratch/exploration.ipynb.tmpl | 12 +++++++++- .../src/dlt_pipeline.ipynb.tmpl | 2 +- .../{{.project_name}}/src/notebook.ipynb.tmpl | 12 +++++++++- .../src/{{.project_name}}/main.py.tmpl | 23 +++++++++++-------- .../{{.project_name}}/tests/main_test.py.tmpl | 19 ++------------- 6 files changed, 42 insertions(+), 29 deletions(-) diff --git a/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl index 6da403219..93dd4c480 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl @@ -3,6 +3,9 @@ ## For defining dependencies used by jobs in Databricks Workflows, see ## https://docs.databricks.com/dev-tools/bundles/library-dependencies.html +## Add code completion support for DLT +databricks-dlt + ## pytest is the default package used for testing pytest diff --git a/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl index 04bb261cd..42164dff0 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl @@ -1,5 +1,15 @@ { "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, { "cell_type": "code", "execution_count": null, @@ -22,7 +32,7 @@ "sys.path.append('../src')\n", "from {{.project_name}} import main\n", "\n", - "main.get_taxis().show(10)" + "main.get_taxis(spark).show(10)" {{else}} "spark.range(10)" {{end -}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl index 4f50294f6..b152e9a30 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl @@ -63,7 +63,7 @@ {{- if (eq .include_python "yes") }} "@dlt.view\n", "def taxi_raw():\n", - " return main.get_taxis()\n", + " return main.get_taxis(spark)\n", {{else}} "\n", "@dlt.view\n", diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl index 0ab61db2c..a228f8d18 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl @@ -17,6 +17,16 @@ "This default notebook is executed using Databricks Workflows as defined in resources/{{.project_name}}_job.yml." ] }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, { "cell_type": "code", "execution_count": 0, @@ -37,7 +47,7 @@ {{- if (eq .include_python "yes") }} "from {{.project_name}} import main\n", "\n", - "main.get_taxis().show(10)" + "main.get_taxis(spark).show(10)" {{else}} "spark.range(10)" {{end -}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl index 4fe5ac8f4..c514c6dc5 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl @@ -1,16 +1,21 @@ -{{- /* -We use pyspark.sql rather than DatabricksSession.builder.getOrCreate() -for compatibility with older runtimes. With a new runtime, it's -equivalent to DatabricksSession.builder.getOrCreate(). -*/ -}} -from pyspark.sql import SparkSession +from pyspark.sql import SparkSession, DataFrame -def get_taxis(): - spark = SparkSession.builder.getOrCreate() +def get_taxis(spark: SparkSession) -> DataFrame: return spark.read.table("samples.nyctaxi.trips") + +# Create a new Databricks Connect session. If this fails, +# check that you have configured Databricks Connect correctly. +# See https://docs.databricks.com/dev-tools/databricks-connect.html. +def get_spark() -> SparkSession: + try: + from databricks.connect import DatabricksSession + return DatabricksSession.builder.getOrCreate() + except ImportError: + return SparkSession.builder.getOrCreate() + def main(): - get_taxis().show(5) + get_taxis(get_spark()).show(5) if __name__ == '__main__': main() diff --git a/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl index a7a6afe0a..fea2f3f66 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl @@ -1,21 +1,6 @@ -from databricks.connect import DatabricksSession -from pyspark.sql import SparkSession -from {{.project_name}} import main +from {{.project_name}}.main import get_taxis, get_spark -# Create a new Databricks Connect session. If this fails, -# check that you have configured Databricks Connect correctly. -# See https://docs.databricks.com/dev-tools/databricks-connect.html. -{{/* - The below works around a problematic error message from Databricks Connect. - The standard SparkSession is supported in all configurations (workspace, IDE, - all runtime versions, CLI). But on the CLI it currently gives a confusing - error message if SPARK_REMOTE is not set. We can't directly use - DatabricksSession.builder in main.py, so we're re-assigning it here so - everything works out of the box, even for CLI users who don't set SPARK_REMOTE. -*/}} -SparkSession.builder = DatabricksSession.builder -SparkSession.builder.getOrCreate() def test_main(): - taxis = main.get_taxis() + taxis = get_taxis(get_spark()) assert taxis.count() > 5 From 74b1e05ed74a3741be9491f881d1f0488c59bd0d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 6 Mar 2024 10:53:44 +0100 Subject: [PATCH 066/286] Update Go SDK to v0.34.0 (#1256) ## Changes SDK release https://github.com/databricks/databricks-sdk-go/releases/tag/v0.34.0 This incorporates two changes to the generation code: * Use explicit empty check for response types (see https://github.com/databricks/databricks-sdk-go/pull/831) * Support subservices for the settings commands (see https://github.com/databricks/databricks-sdk-go/pull/826) As part of the subservices support, this change also updates how methods are registered with their services. This used to be done with `init` functions and now through inline function calls. This should have a (negligible) positive impact on binary start time because we no longer have to call as many `init` functions. ## Tests tbd --- .codegen/_openapi_sha | 2 +- .codegen/cmds-account.go.tmpl | 4 +- .codegen/cmds-workspace.go.tmpl | 4 +- .codegen/service.go.tmpl | 49 +- .gitattributes | 9 + bundle/schema/docs/bundle_descriptions.json | 52 +-- cmd/account/access-control/access-control.go | 23 +- cmd/account/billable-usage/billable-usage.go | 9 +- cmd/account/budgets/budgets.go | 37 +- cmd/account/credentials/credentials.go | 30 +- .../csp-enablement-account.go | 159 +++++++ .../custom-app-integration.go | 37 +- .../encryption-keys/encryption-keys.go | 30 +- .../esm-enablement-account.go | 157 +++++++ cmd/account/groups/groups.go | 44 +- .../ip-access-lists/ip-access-lists.go | 44 +- cmd/account/log-delivery/log-delivery.go | 30 +- .../metastore-assignments.go | 37 +- cmd/account/metastores/metastores.go | 37 +- .../network-connectivity.go | 86 +--- cmd/account/networks/networks.go | 30 +- .../o-auth-published-apps.go | 9 +- .../personal-compute/personal-compute.go | 219 +++++++++ cmd/account/private-access/private-access.go | 37 +- .../published-app-integration.go | 37 +- .../service-principal-secrets.go | 23 +- .../service-principals/service-principals.go | 44 +- cmd/account/settings/settings.go | 219 +-------- .../storage-credentials.go | 37 +- cmd/account/storage/storage.go | 30 +- cmd/account/users/users.go | 44 +- cmd/account/vpc-endpoints/vpc-endpoints.go | 30 +- .../workspace-assignment.go | 30 +- cmd/account/workspaces/workspaces.go | 96 ++-- cmd/workspace/alerts/alerts.go | 37 +- cmd/workspace/apps/apps.go | 46 +- .../artifact-allowlists.go | 16 +- .../automatic-cluster-update.go | 157 +++++++ cmd/workspace/catalogs/catalogs.go | 37 +- cmd/workspace/clean-rooms/clean-rooms.go | 37 +- .../cluster-policies/cluster-policies.go | 65 +-- cmd/workspace/clusters/clusters.go | 142 +----- cmd/workspace/cmd.go | 2 + cmd/workspace/connections/connections.go | 37 +- .../credentials-manager.go | 9 +- .../csp-enablement/csp-enablement.go | 160 +++++++ cmd/workspace/current-user/current-user.go | 9 +- .../dashboard-widgets/dashboard-widgets.go | 23 +- cmd/workspace/dashboards/dashboards.go | 44 +- cmd/workspace/data-sources/data-sources.go | 9 +- .../default-namespace/default-namespace.go | 229 ++++++++++ .../esm-enablement/esm-enablement.go | 162 +++++++ cmd/workspace/experiments/experiments.go | 212 ++------- .../external-locations/external-locations.go | 37 +- cmd/workspace/functions/functions.go | 37 +- .../git-credentials/git-credentials.go | 37 +- .../global-init-scripts.go | 37 +- cmd/workspace/grants/grants.go | 23 +- cmd/workspace/groups/groups.go | 44 +- .../instance-pools/instance-pools.go | 65 +-- .../instance-profiles/instance-profiles.go | 30 +- .../ip-access-lists/ip-access-lists.go | 44 +- cmd/workspace/jobs/jobs.go | 142 +----- .../lakehouse-monitors/lakehouse-monitors.go | 62 +-- cmd/workspace/lakeview/lakeview.go | 9 +- cmd/workspace/libraries/libraries.go | 30 +- cmd/workspace/metastores/metastores.go | 72 +-- .../model-registry/model-registry.go | 254 ++--------- .../model-versions/model-versions.go | 37 +- cmd/workspace/online-tables/online-tables.go | 23 +- .../permission-migration.go | 136 ++++++ cmd/workspace/permissions/permissions.go | 30 +- cmd/workspace/pipelines/pipelines.go | 100 +--- .../policy-families/policy-families.go | 16 +- cmd/workspace/providers/providers.go | 44 +- cmd/workspace/queries/queries.go | 44 +- cmd/workspace/query-history/query-history.go | 9 +- .../query-visualizations.go | 23 +- .../recipient-activation.go | 16 +- cmd/workspace/recipients/recipients.go | 51 +-- .../registered-models/registered-models.go | 51 +-- cmd/workspace/repos/repos.go | 65 +-- .../restrict-workspace-admins.go | 227 ++++++++++ cmd/workspace/schemas/schemas.go | 37 +- cmd/workspace/secrets/secrets.go | 72 +-- .../service-principals/service-principals.go | 44 +- .../serving-endpoints/serving-endpoints.go | 107 +---- cmd/workspace/settings/settings.go | 428 +----------------- cmd/workspace/shares/shares.go | 51 +-- .../storage-credentials.go | 52 +-- .../system-schemas/system-schemas.go | 23 +- .../table-constraints/table-constraints.go | 16 +- cmd/workspace/tables/tables.go | 44 +- .../token-management/token-management.go | 58 +-- cmd/workspace/tokens/tokens.go | 23 +- cmd/workspace/users/users.go | 72 +-- .../vector-search-endpoints.go | 38 +- .../vector-search-indexes.go | 89 ++-- cmd/workspace/volumes/volumes.go | 37 +- cmd/workspace/warehouses/warehouses.go | 93 +--- .../workspace-bindings/workspace-bindings.go | 30 +- .../workspace-conf/workspace-conf.go | 16 +- cmd/workspace/workspace/workspace.go | 72 +-- go.mod | 20 +- go.sum | 52 +-- 105 files changed, 2512 insertions(+), 3955 deletions(-) create mode 100755 cmd/account/csp-enablement-account/csp-enablement-account.go create mode 100755 cmd/account/esm-enablement-account/esm-enablement-account.go create mode 100755 cmd/account/personal-compute/personal-compute.go create mode 100755 cmd/workspace/automatic-cluster-update/automatic-cluster-update.go create mode 100755 cmd/workspace/csp-enablement/csp-enablement.go create mode 100755 cmd/workspace/default-namespace/default-namespace.go create mode 100755 cmd/workspace/esm-enablement/esm-enablement.go create mode 100755 cmd/workspace/permission-migration/permission-migration.go create mode 100755 cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 013e5ffe8..fb91589e9 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -cdd76a98a4fca7008572b3a94427566dd286c63b \ No newline at end of file +d855b30f25a06fe84f25214efa20e7f1fffcdf9e \ No newline at end of file diff --git a/.codegen/cmds-account.go.tmpl b/.codegen/cmds-account.go.tmpl index f3da7e2c8..24b6bdd7c 100644 --- a/.codegen/cmds-account.go.tmpl +++ b/.codegen/cmds-account.go.tmpl @@ -7,7 +7,7 @@ package account import ( "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" - {{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) }} + {{range .Services}}{{if and .IsAccounts (not .HasParent)}}{{if not (in $excludes .KebabName) }} {{.SnakeName}} "github.com/databricks/cli/cmd/account/{{(.TrimPrefix "account").KebabName}}"{{end}}{{end}}{{end}} ) @@ -17,7 +17,7 @@ func New() *cobra.Command { Short: `Databricks Account Commands`, } - {{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) -}} + {{range .Services}}{{if and .IsAccounts (not .HasParent)}}{{if not (in $excludes .KebabName) -}} cmd.AddCommand({{.SnakeName}}.New()) {{end}}{{end}}{{end}} diff --git a/.codegen/cmds-workspace.go.tmpl b/.codegen/cmds-workspace.go.tmpl index a9daa05d8..244dde61a 100644 --- a/.codegen/cmds-workspace.go.tmpl +++ b/.codegen/cmds-workspace.go.tmpl @@ -14,14 +14,14 @@ package workspace import ( "github.com/databricks/cli/cmd/root" - {{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) }} + {{range .Services}}{{if and (not .IsAccounts) (not .HasParent)}}{{if not (in $excludes .KebabName) }} {{.SnakeName}} "github.com/databricks/cli/cmd/workspace/{{.KebabName}}"{{end}}{{end}}{{end}} ) func All() []*cobra.Command { var out []*cobra.Command - {{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) -}} + {{range .Services}}{{if and (not .IsAccounts) (not .HasParent)}}{{if not (in $excludes .KebabName) -}} out = append(out, {{.SnakeName}}.New()) {{end}}{{end}}{{end}} diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index ad25135ae..0665b661f 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -8,6 +8,10 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}" "github.com/spf13/cobra" + + {{range .Subservices -}} + {{.SnakeName}} "github.com/databricks/cli/cmd/{{ if .ParentService.IsAccounts }}account{{ else }}workspace{{ end }}/{{.KebabName}}" + {{end}} ) {{ $excludes := @@ -34,6 +38,8 @@ import ( ]{{end}}{{end}} {{define "service"}} +{{- $excludeMethods := list "put-secret" -}} + // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var cmdOverrides []func(*cobra.Command) @@ -45,10 +51,12 @@ func New() *cobra.Command { Short: `{{.Summary | without "`"}}`, Long: `{{.Comment " " 80 | without "`"}}`, {{- end }} + {{- if not .HasParent }} GroupID: "{{ .Package.Name }}", Annotations: map[string]string{ "package": "{{ .Package.Name }}", }, + {{- end }} {{- if .IsPrivatePreview }} // This service is being previewed; hide from help output. @@ -56,6 +64,23 @@ func New() *cobra.Command { {{- end }} } + {{ if gt (len .Methods) 0 -}} + // Add methods + {{- range .Methods}} + {{- if in $excludeMethods .KebabName }} + {{- continue}} + {{- end}} + cmd.AddCommand(new{{.PascalName}}()) + {{- end}} + {{- end}} + + {{ if .HasSubservices }} + // Add subservices + {{- range .Subservices}} + cmd.AddCommand({{.SnakeName}}.New()) + {{- end}} + {{- end}} + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -67,8 +92,7 @@ func New() *cobra.Command { {{- $serviceName := .KebabName -}} {{range .Methods}} -{{- $excludes := list "put-secret" -}} -{{if in $excludes .KebabName }} +{{if in $excludeMethods .KebabName }} {{continue}} {{end}} // start {{.KebabName}} command @@ -242,7 +266,7 @@ func new{{.PascalName}}() *cobra.Command { return err } if {{.CamelName}}SkipWait { - {{if .Response -}} + {{if not .Response.IsEmpty -}} return cmdio.Render(ctx, wait.Response) {{- else -}} return nil @@ -291,26 +315,29 @@ func new{{.PascalName}}() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(new{{.PascalName}}()) - }) -} {{end}} // end service {{.Name}}{{end}} {{- define "method-call" -}} - {{if .Response -}} + {{if not .Response.IsEmpty -}} response{{ if not .Pagination}}, err{{end}} := {{- else -}} err = - {{- end}} {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}}) + {{- end}} + {{- if .Service.IsAccounts}}a{{else}}w{{end}}. + {{- if .Service.HasParent }} + {{- (.Service.ParentService.TrimPrefix "account").PascalName }}. + {{- (.Service.TrimPrefix "account").PascalName}}(). + {{- else}} + {{- (.Service.TrimPrefix "account").PascalName}}. + {{- end}} + {{- .PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}}) {{- if not (and .Response .Pagination) }} if err != nil { return err } {{- end}} - {{ if .Response -}} + {{ if not .Response.IsEmpty -}} {{- if .IsResponseByteStream -}} defer response.{{.ResponseBodyField.PascalName}}.Close() return cmdio.Render{{ if .Pagination}}Iterator{{end}}(ctx, response.{{.ResponseBodyField.PascalName}}) diff --git a/.gitattributes b/.gitattributes index 09aac5e75..c7d605130 100755 --- a/.gitattributes +++ b/.gitattributes @@ -4,8 +4,10 @@ cmd/account/billable-usage/billable-usage.go linguist-generated=true cmd/account/budgets/budgets.go linguist-generated=true cmd/account/cmd.go linguist-generated=true cmd/account/credentials/credentials.go linguist-generated=true +cmd/account/csp-enablement-account/csp-enablement-account.go linguist-generated=true cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true cmd/account/encryption-keys/encryption-keys.go linguist-generated=true +cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true cmd/account/groups/groups.go linguist-generated=true cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/account/log-delivery/log-delivery.go linguist-generated=true @@ -14,6 +16,7 @@ cmd/account/metastores/metastores.go linguist-generated=true cmd/account/network-connectivity/network-connectivity.go linguist-generated=true cmd/account/networks/networks.go linguist-generated=true cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true +cmd/account/personal-compute/personal-compute.go linguist-generated=true cmd/account/private-access/private-access.go linguist-generated=true cmd/account/published-app-integration/published-app-integration.go linguist-generated=true cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true @@ -28,6 +31,7 @@ cmd/account/workspaces/workspaces.go linguist-generated=true cmd/workspace/alerts/alerts.go linguist-generated=true cmd/workspace/apps/apps.go linguist-generated=true cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true +cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true cmd/workspace/catalogs/catalogs.go linguist-generated=true cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true @@ -35,10 +39,13 @@ cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true cmd/workspace/connections/connections.go linguist-generated=true cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true +cmd/workspace/csp-enablement/csp-enablement.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true +cmd/workspace/default-namespace/default-namespace.go linguist-generated=true +cmd/workspace/esm-enablement/esm-enablement.go linguist-generated=true cmd/workspace/experiments/experiments.go linguist-generated=true cmd/workspace/external-locations/external-locations.go linguist-generated=true cmd/workspace/functions/functions.go linguist-generated=true @@ -57,6 +64,7 @@ cmd/workspace/metastores/metastores.go linguist-generated=true cmd/workspace/model-registry/model-registry.go linguist-generated=true cmd/workspace/model-versions/model-versions.go linguist-generated=true cmd/workspace/online-tables/online-tables.go linguist-generated=true +cmd/workspace/permission-migration/permission-migration.go linguist-generated=true cmd/workspace/permissions/permissions.go linguist-generated=true cmd/workspace/pipelines/pipelines.go linguist-generated=true cmd/workspace/policy-families/policy-families.go linguist-generated=true @@ -68,6 +76,7 @@ cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=tr cmd/workspace/recipients/recipients.go linguist-generated=true cmd/workspace/registered-models/registered-models.go linguist-generated=true cmd/workspace/repos/repos.go linguist-generated=true +cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true cmd/workspace/schemas/schemas.go linguist-generated=true cmd/workspace/secrets/secrets.go linguist-generated=true cmd/workspace/service-principals/service-principals.go linguist-generated=true diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 982dd4eb7..494c8c752 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -193,7 +193,7 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." } } }, @@ -351,13 +351,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -725,7 +725,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -785,7 +785,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -959,13 +959,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -1269,7 +1269,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -1371,7 +1371,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -1449,7 +1449,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -1551,7 +1551,7 @@ } }, "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." }, "table": { "description": "Table trigger settings.", @@ -2061,13 +2061,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -2726,7 +2726,7 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." } } }, @@ -2884,13 +2884,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -3258,7 +3258,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -3318,7 +3318,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -3492,13 +3492,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -3802,7 +3802,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -3904,7 +3904,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -3982,7 +3982,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -4084,7 +4084,7 @@ } }, "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." }, "table": { "description": "Table trigger settings.", @@ -4594,13 +4594,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" diff --git a/cmd/account/access-control/access-control.go b/cmd/account/access-control/access-control.go index 36b69d01d..76ad4b51f 100755 --- a/cmd/account/access-control/access-control.go +++ b/cmd/account/access-control/access-control.go @@ -29,6 +29,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGetAssignableRolesForResource()) + cmd.AddCommand(newGetRuleSet()) + cmd.AddCommand(newUpdateRuleSet()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -97,12 +102,6 @@ func newGetAssignableRolesForResource() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetAssignableRolesForResource()) - }) -} - // start get-rule-set command // Slice with functions to override default command behavior. @@ -172,12 +171,6 @@ func newGetRuleSet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetRuleSet()) - }) -} - // start update-rule-set command // Slice with functions to override default command behavior. @@ -239,10 +232,4 @@ func newUpdateRuleSet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateRuleSet()) - }) -} - // end service AccountAccessControl diff --git a/cmd/account/billable-usage/billable-usage.go b/cmd/account/billable-usage/billable-usage.go index bbbc9af23..d8d36bacc 100755 --- a/cmd/account/billable-usage/billable-usage.go +++ b/cmd/account/billable-usage/billable-usage.go @@ -25,6 +25,9 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDownload()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -107,10 +110,4 @@ func newDownload() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDownload()) - }) -} - // end service BillableUsage diff --git a/cmd/account/budgets/budgets.go b/cmd/account/budgets/budgets.go index dfa2f6bc4..e6f87a953 100755 --- a/cmd/account/budgets/budgets.go +++ b/cmd/account/budgets/budgets.go @@ -31,6 +31,13 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -98,12 +105,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -174,12 +175,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -251,12 +246,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -297,12 +286,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -372,10 +355,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Budgets diff --git a/cmd/account/credentials/credentials.go b/cmd/account/credentials/credentials.go index 72fcd70bd..ed071cda3 100755 --- a/cmd/account/credentials/credentials.go +++ b/cmd/account/credentials/credentials.go @@ -31,6 +31,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -111,12 +117,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -189,12 +189,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -266,12 +260,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -315,10 +303,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service Credentials diff --git a/cmd/account/csp-enablement-account/csp-enablement-account.go b/cmd/account/csp-enablement-account/csp-enablement-account.go new file mode 100755 index 000000000..ca2170fad --- /dev/null +++ b/cmd/account/csp-enablement-account/csp-enablement-account.go @@ -0,0 +1,159 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package csp_enablement_account + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "csp-enablement-account", + Short: `The compliance security profile settings at the account level control whether to enable it for new workspaces.`, + Long: `The compliance security profile settings at the account level control whether + to enable it for new workspaces. By default, this account-level setting is + disabled for new workspaces. After workspace creation, account admins can + enable the compliance security profile individually for each workspace. + + This settings can be disabled so that new workspaces do not have compliance + security profile enabled by default.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetCspEnablementAccountRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetCspEnablementAccountRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the compliance security profile setting for new workspaces.` + cmd.Long = `Get the compliance security profile setting for new workspaces. + + Gets the compliance security profile setting for new workspaces.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response, err := a.Settings.CspEnablementAccount().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateCspEnablementAccountSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateCspEnablementAccountSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the compliance security profile setting for new workspaces.` + cmd.Long = `Update the compliance security profile setting for new workspaces. + + Updates the value of the compliance security profile setting for new + workspaces.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := a.Settings.CspEnablementAccount().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CSPEnablementAccount diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index 79c0f8373..79dd50c1f 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -29,6 +29,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -103,12 +110,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -168,12 +169,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -232,12 +227,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -278,12 +267,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -354,10 +337,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service CustomAppIntegration diff --git a/cmd/account/encryption-keys/encryption-keys.go b/cmd/account/encryption-keys/encryption-keys.go index 3977f5837..c82f385ed 100755 --- a/cmd/account/encryption-keys/encryption-keys.go +++ b/cmd/account/encryption-keys/encryption-keys.go @@ -42,6 +42,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -128,12 +134,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -193,12 +193,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -271,12 +265,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -331,10 +319,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service EncryptionKeys diff --git a/cmd/account/esm-enablement-account/esm-enablement-account.go b/cmd/account/esm-enablement-account/esm-enablement-account.go new file mode 100755 index 000000000..fc793d60a --- /dev/null +++ b/cmd/account/esm-enablement-account/esm-enablement-account.go @@ -0,0 +1,157 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package esm_enablement_account + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "esm-enablement-account", + Short: `The enhanced security monitoring setting at the account level controls whether to enable the feature on new workspaces.`, + Long: `The enhanced security monitoring setting at the account level controls whether + to enable the feature on new workspaces. By default, this account-level + setting is disabled for new workspaces. After workspace creation, account + admins can enable enhanced security monitoring individually for each + workspace.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetEsmEnablementAccountRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetEsmEnablementAccountRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the enhanced security monitoring setting for new workspaces.` + cmd.Long = `Get the enhanced security monitoring setting for new workspaces. + + Gets the enhanced security monitoring setting for new workspaces.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response, err := a.Settings.EsmEnablementAccount().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateEsmEnablementAccountSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateEsmEnablementAccountSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the enhanced security monitoring setting for new workspaces.` + cmd.Long = `Update the enhanced security monitoring setting for new workspaces. + + Updates the value of the enhanced security monitoring setting for new + workspaces.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := a.Settings.EsmEnablementAccount().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ESMEnablementAccount diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index a068fba45..68ae1b2af 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -33,6 +33,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -114,12 +122,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -190,12 +192,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -266,12 +262,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -330,12 +320,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -417,12 +401,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -511,10 +489,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountGroups diff --git a/cmd/account/ip-access-lists/ip-access-lists.go b/cmd/account/ip-access-lists/ip-access-lists.go index dd836c90a..364c5a919 100755 --- a/cmd/account/ip-access-lists/ip-access-lists.go +++ b/cmd/account/ip-access-lists/ip-access-lists.go @@ -48,6 +48,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newReplace()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -158,12 +166,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -234,12 +236,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -310,12 +306,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -355,12 +345,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start replace command // Slice with functions to override default command behavior. @@ -468,12 +452,6 @@ func newReplace() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReplace()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -570,10 +548,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountIpAccessLists diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index eed8942b8..f51573e9f 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -85,6 +85,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatchStatus()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -181,12 +187,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -258,12 +258,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -319,12 +313,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch-status command // Slice with functions to override default command behavior. @@ -413,10 +401,4 @@ func newPatchStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatchStatus()) - }) -} - // end service LogDelivery diff --git a/cmd/account/metastore-assignments/metastore-assignments.go b/cmd/account/metastore-assignments/metastore-assignments.go index b1d0508b3..013d25cff 100755 --- a/cmd/account/metastore-assignments/metastore-assignments.go +++ b/cmd/account/metastore-assignments/metastore-assignments.go @@ -27,6 +27,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -108,12 +115,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -178,12 +179,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -248,12 +243,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -310,12 +299,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -390,10 +373,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountMetastoreAssignments diff --git a/cmd/account/metastores/metastores.go b/cmd/account/metastores/metastores.go index e8b7c8f70..bcccff812 100755 --- a/cmd/account/metastores/metastores.go +++ b/cmd/account/metastores/metastores.go @@ -26,6 +26,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -98,12 +105,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -164,12 +165,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -228,12 +223,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -273,12 +262,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -347,10 +330,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountMetastores diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index bfe116f28..fbde0694e 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -21,20 +21,23 @@ func New() *cobra.Command { Use: "network-connectivity", Short: `These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources.`, Long: `These APIs provide configurations for the network connectivity of your - workspaces for serverless compute resources. This API provides stable subnets - for your workspace so that you can configure your firewalls on your Azure - Storage accounts to allow access from Databricks. You can also use the API to - provision private endpoints for Databricks to privately connect serverless - compute resources to your Azure resources using Azure Private Link. See - [configure serverless secure connectivity]. - - [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security`, + workspaces for serverless compute resources.`, GroupID: "settings", Annotations: map[string]string{ "package": "settings", }, } + // Add methods + cmd.AddCommand(newCreateNetworkConnectivityConfiguration()) + cmd.AddCommand(newCreatePrivateEndpointRule()) + cmd.AddCommand(newDeleteNetworkConnectivityConfiguration()) + cmd.AddCommand(newDeletePrivateEndpointRule()) + cmd.AddCommand(newGetNetworkConnectivityConfiguration()) + cmd.AddCommand(newGetPrivateEndpointRule()) + cmd.AddCommand(newListNetworkConnectivityConfigurations()) + cmd.AddCommand(newListPrivateEndpointRules()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -64,29 +67,14 @@ func newCreateNetworkConnectivityConfiguration() *cobra.Command { cmd.Use = "create-network-connectivity-configuration NAME REGION" cmd.Short = `Create a network connectivity configuration.` cmd.Long = `Create a network connectivity configuration. - - Creates a network connectivity configuration (NCC), which provides stable - Azure service subnets when accessing your Azure Storage accounts. You can also - use a network connectivity configuration to create Databricks-managed private - endpoints so that Databricks serverless compute resources privately access - your resources. - - **IMPORTANT**: After you create the network connectivity configuration, you - must assign one or more workspaces to the new network connectivity - configuration. You can share one network connectivity configuration with - multiple workspaces from the same Azure region within the same Databricks - account. See [configure serverless secure connectivity]. - - [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security Arguments: NAME: The name of the network connectivity configuration. The name can contain alphanumeric characters, hyphens, and underscores. The length must be between 3 and 30 characters. The name must match the regular expression ^[0-9a-zA-Z-_]{3,30}$. - REGION: The Azure region for this network connectivity configuration. Only - workspaces in the same Azure region can be attached to this network - connectivity configuration.` + REGION: The region for the network connectivity configuration. Only workspaces in + the same region can be attached to the network connectivity configuration.` cmd.Annotations = make(map[string]string) @@ -139,12 +127,6 @@ func newCreateNetworkConnectivityConfiguration() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateNetworkConnectivityConfiguration()) - }) -} - // start create-private-endpoint-rule command // Slice with functions to override default command behavior. @@ -240,12 +222,6 @@ func newCreatePrivateEndpointRule() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreatePrivateEndpointRule()) - }) -} - // start delete-network-connectivity-configuration command // Slice with functions to override default command behavior. @@ -304,12 +280,6 @@ func newDeleteNetworkConnectivityConfiguration() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteNetworkConnectivityConfiguration()) - }) -} - // start delete-private-endpoint-rule command // Slice with functions to override default command behavior. @@ -374,12 +344,6 @@ func newDeletePrivateEndpointRule() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeletePrivateEndpointRule()) - }) -} - // start get-network-connectivity-configuration command // Slice with functions to override default command behavior. @@ -438,12 +402,6 @@ func newGetNetworkConnectivityConfiguration() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetNetworkConnectivityConfiguration()) - }) -} - // start get-private-endpoint-rule command // Slice with functions to override default command behavior. @@ -504,12 +462,6 @@ func newGetPrivateEndpointRule() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPrivateEndpointRule()) - }) -} - // start list-network-connectivity-configurations command // Slice with functions to override default command behavior. @@ -562,12 +514,6 @@ func newListNetworkConnectivityConfigurations() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListNetworkConnectivityConfigurations()) - }) -} - // start list-private-endpoint-rules command // Slice with functions to override default command behavior. @@ -625,10 +571,4 @@ func newListPrivateEndpointRules() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListPrivateEndpointRules()) - }) -} - // end service NetworkConnectivity diff --git a/cmd/account/networks/networks.go b/cmd/account/networks/networks.go index 15586bdc9..6dc772973 100755 --- a/cmd/account/networks/networks.go +++ b/cmd/account/networks/networks.go @@ -28,6 +28,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -119,12 +125,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -200,12 +200,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -277,12 +271,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -329,10 +317,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service Networks diff --git a/cmd/account/o-auth-published-apps/o-auth-published-apps.go b/cmd/account/o-auth-published-apps/o-auth-published-apps.go index 1ce363ac9..a9e94e5aa 100755 --- a/cmd/account/o-auth-published-apps/o-auth-published-apps.go +++ b/cmd/account/o-auth-published-apps/o-auth-published-apps.go @@ -27,6 +27,9 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -88,10 +91,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service OAuthPublishedApps diff --git a/cmd/account/personal-compute/personal-compute.go b/cmd/account/personal-compute/personal-compute.go new file mode 100755 index 000000000..79090faf2 --- /dev/null +++ b/cmd/account/personal-compute/personal-compute.go @@ -0,0 +1,219 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package personal_compute + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "personal-compute", + Short: `The Personal Compute enablement setting lets you control which users can use the Personal Compute default policy to create compute resources.`, + Long: `The Personal Compute enablement setting lets you control which users can use + the Personal Compute default policy to create compute resources. By default + all users in all workspaces have access (ON), but you can change the setting + to instead let individual workspaces configure access control (DELEGATE). + + There is only one instance of this setting per account. Since this setting has + a default value, this setting is present on all accounts even though it's + never set on a given account. Deletion reverts the value of the setting back + to the default value.`, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeletePersonalComputeRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeletePersonalComputeRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete Personal Compute setting.` + cmd.Long = `Delete Personal Compute setting. + + Reverts back the Personal Compute setting value to default (ON)` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response, err := a.Settings.PersonalCompute().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetPersonalComputeRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetPersonalComputeRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get Personal Compute setting.` + cmd.Long = `Get Personal Compute setting. + + Gets the value of the Personal Compute setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response, err := a.Settings.PersonalCompute().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdatePersonalComputeSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdatePersonalComputeSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update Personal Compute setting.` + cmd.Long = `Update Personal Compute setting. + + Updates the value of the Personal Compute setting.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := a.Settings.PersonalCompute().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service PersonalCompute diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index 458ff827e..4641223c8 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -27,6 +27,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newReplace()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -133,12 +140,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -216,12 +217,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -299,12 +294,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -348,12 +337,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start replace command // Slice with functions to override default command behavior. @@ -460,10 +443,4 @@ func newReplace() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReplace()) - }) -} - // end service PrivateAccess diff --git a/cmd/account/published-app-integration/published-app-integration.go b/cmd/account/published-app-integration/published-app-integration.go index 54cf63371..8befd39ba 100755 --- a/cmd/account/published-app-integration/published-app-integration.go +++ b/cmd/account/published-app-integration/published-app-integration.go @@ -27,6 +27,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -103,12 +110,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -168,12 +169,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -232,12 +227,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -278,12 +267,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -353,10 +336,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service PublishedAppIntegration diff --git a/cmd/account/service-principal-secrets/service-principal-secrets.go b/cmd/account/service-principal-secrets/service-principal-secrets.go index 1a646e25c..0239df664 100755 --- a/cmd/account/service-principal-secrets/service-principal-secrets.go +++ b/cmd/account/service-principal-secrets/service-principal-secrets.go @@ -38,6 +38,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -107,12 +112,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -176,12 +175,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -242,10 +235,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service ServicePrincipalSecrets diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index af18d5341..b9ad194cf 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -32,6 +32,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -112,12 +120,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -188,12 +190,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -265,12 +261,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -329,12 +319,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -417,12 +401,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -513,10 +491,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountServicePrincipals diff --git a/cmd/account/settings/settings.go b/cmd/account/settings/settings.go index adeda73d9..a750e81e0 100755 --- a/cmd/account/settings/settings.go +++ b/cmd/account/settings/settings.go @@ -3,13 +3,11 @@ package settings import ( - "fmt" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/settings" "github.com/spf13/cobra" + + csp_enablement_account "github.com/databricks/cli/cmd/account/csp-enablement-account" + esm_enablement_account "github.com/databricks/cli/cmd/account/esm-enablement-account" + personal_compute "github.com/databricks/cli/cmd/account/personal-compute" ) // Slice with functions to override default command behavior. @@ -18,26 +16,20 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "settings", - Short: `The Personal Compute enablement setting lets you control which users can use the Personal Compute default policy to create compute resources.`, - Long: `The Personal Compute enablement setting lets you control which users can use - the Personal Compute default policy to create compute resources. By default - all users in all workspaces have access (ON), but you can change the setting - to instead let individual workspaces configure access control (DELEGATE). - - There is only one instance of this setting per account. Since this setting has - a default value, this setting is present on all accounts even though it's - never set on a given account. Deletion reverts the value of the setting back - to the default value.`, + Use: "settings", + Short: `Accounts Settings API allows users to manage settings at the account level.`, + Long: `Accounts Settings API allows users to manage settings at the account level.`, GroupID: "settings", Annotations: map[string]string{ "package": "settings", }, - - // This service is being previewed; hide from help output. - Hidden: true, } + // Add subservices + cmd.AddCommand(csp_enablement_account.New()) + cmd.AddCommand(esm_enablement_account.New()) + cmd.AddCommand(personal_compute.New()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -46,191 +38,4 @@ func New() *cobra.Command { return cmd } -// start delete-personal-compute-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deletePersonalComputeSettingOverrides []func( - *cobra.Command, - *settings.DeletePersonalComputeSettingRequest, -) - -func newDeletePersonalComputeSetting() *cobra.Command { - cmd := &cobra.Command{} - - var deletePersonalComputeSettingReq settings.DeletePersonalComputeSettingRequest - - // TODO: short flags - - cmd.Flags().StringVar(&deletePersonalComputeSettingReq.Etag, "etag", deletePersonalComputeSettingReq.Etag, `etag used for versioning.`) - - cmd.Use = "delete-personal-compute-setting" - cmd.Short = `Delete Personal Compute setting.` - cmd.Long = `Delete Personal Compute setting. - - Reverts back the Personal Compute setting value to default (ON)` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - response, err := a.Settings.DeletePersonalComputeSetting(ctx, deletePersonalComputeSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deletePersonalComputeSettingOverrides { - fn(cmd, &deletePersonalComputeSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeletePersonalComputeSetting()) - }) -} - -// start get-personal-compute-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getPersonalComputeSettingOverrides []func( - *cobra.Command, - *settings.GetPersonalComputeSettingRequest, -) - -func newGetPersonalComputeSetting() *cobra.Command { - cmd := &cobra.Command{} - - var getPersonalComputeSettingReq settings.GetPersonalComputeSettingRequest - - // TODO: short flags - - cmd.Flags().StringVar(&getPersonalComputeSettingReq.Etag, "etag", getPersonalComputeSettingReq.Etag, `etag used for versioning.`) - - cmd.Use = "get-personal-compute-setting" - cmd.Short = `Get Personal Compute setting.` - cmd.Long = `Get Personal Compute setting. - - Gets the value of the Personal Compute setting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - response, err := a.Settings.GetPersonalComputeSetting(ctx, getPersonalComputeSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getPersonalComputeSettingOverrides { - fn(cmd, &getPersonalComputeSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPersonalComputeSetting()) - }) -} - -// start update-personal-compute-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updatePersonalComputeSettingOverrides []func( - *cobra.Command, - *settings.UpdatePersonalComputeSettingRequest, -) - -func newUpdatePersonalComputeSetting() *cobra.Command { - cmd := &cobra.Command{} - - var updatePersonalComputeSettingReq settings.UpdatePersonalComputeSettingRequest - var updatePersonalComputeSettingJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updatePersonalComputeSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Use = "update-personal-compute-setting" - cmd.Short = `Update Personal Compute setting.` - cmd.Long = `Update Personal Compute setting. - - Updates the value of the Personal Compute setting.` - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - if cmd.Flags().Changed("json") { - err = updatePersonalComputeSettingJson.Unmarshal(&updatePersonalComputeSettingReq) - if err != nil { - return err - } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") - } - - response, err := a.Settings.UpdatePersonalComputeSetting(ctx, updatePersonalComputeSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range updatePersonalComputeSettingOverrides { - fn(cmd, &updatePersonalComputeSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePersonalComputeSetting()) - }) -} - // end service AccountSettings diff --git a/cmd/account/storage-credentials/storage-credentials.go b/cmd/account/storage-credentials/storage-credentials.go index 35b865c7f..61f8521bc 100755 --- a/cmd/account/storage-credentials/storage-credentials.go +++ b/cmd/account/storage-credentials/storage-credentials.go @@ -25,6 +25,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -107,12 +114,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -176,12 +177,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -244,12 +239,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -309,12 +298,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -387,10 +370,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountStorageCredentials diff --git a/cmd/account/storage/storage.go b/cmd/account/storage/storage.go index d671355d6..50460ed0a 100755 --- a/cmd/account/storage/storage.go +++ b/cmd/account/storage/storage.go @@ -32,6 +32,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -108,12 +114,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -185,12 +185,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -261,12 +255,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -310,10 +298,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service Storage diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index f5b81f219..ab4bd95bb 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -37,6 +37,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -120,12 +128,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -197,12 +199,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -281,12 +277,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -345,12 +335,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -433,12 +417,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -530,10 +508,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountUsers diff --git a/cmd/account/vpc-endpoints/vpc-endpoints.go b/cmd/account/vpc-endpoints/vpc-endpoints.go index 6d80e7314..0c15ca9c6 100755 --- a/cmd/account/vpc-endpoints/vpc-endpoints.go +++ b/cmd/account/vpc-endpoints/vpc-endpoints.go @@ -27,6 +27,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -126,12 +132,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -210,12 +210,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -290,12 +284,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -343,10 +331,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service VpcEndpoints diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index ab82cd39f..20f885249 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -28,6 +28,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -103,12 +109,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -170,12 +170,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -235,12 +229,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -318,10 +306,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service WorkspaceAssignment diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 500a7b771..2cc0cb1a7 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -35,6 +35,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -166,12 +173,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -252,12 +253,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -344,12 +339,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -396,12 +385,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -429,7 +412,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.CredentialsId, "credentials-id", updateReq.CredentialsId, `ID of the workspace's credential configuration object.`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().StringVar(&updateReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) - cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, `The ID of the network connectivity configuration object, which is the parent resource of this private endpoint rule object.`) + cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, ``) cmd.Flags().StringVar(&updateReq.NetworkId, "network-id", updateReq.NetworkId, `The ID of the workspace's network configuration object.`) cmd.Flags().StringVar(&updateReq.StorageConfigurationId, "storage-configuration-id", updateReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`) cmd.Flags().StringVar(&updateReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", updateReq.StorageCustomerManagedKeyId, `The ID of the key configuration object for workspace storage.`) @@ -464,7 +447,12 @@ func newUpdate() *cobra.Command { workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace. - Custom tags. Given you provide an empty - custom tags, the update would not be applied. + custom tags, the update would not be applied. - Network connectivity + configuration ID to add serverless stable IP support. You can add or update + the network connectivity configuration ID to ensure the workspace uses the + same set of stable IP CIDR blocks to access your resources. You cannot remove + a network connectivity configuration from the workspace once attached, you can + only switch to another one. After calling the PATCH operation to update the workspace configuration, make repeated GET requests with the workspace ID and check the workspace @@ -476,25 +464,22 @@ func newUpdate() *cobra.Command { ### Update a running workspace You can update a Databricks workspace configuration for running workspaces for some fields, but not all fields. For a running workspace, this request supports updating the following fields only: - - Credential configuration ID - - - Network configuration ID. Used only if you already use a customer-managed - VPC. You cannot convert a running workspace from a Databricks-managed VPC to a - customer-managed VPC. You can use a network configuration update in this API - for a failed or running workspace to add support for PrivateLink, although you - also need to add a private access settings object. - - - Key configuration ID for managed services (control plane storage, such as - notebook source and Databricks SQL queries). Databricks does not directly - encrypt the data with the customer-managed key (CMK). Databricks uses both the - CMK and the Databricks managed key (DMK) that is unique to your workspace to - encrypt the Data Encryption Key (DEK). Databricks uses the DEK to encrypt your - workspace's managed services persisted data. If the workspace does not already - have a CMK for managed services, adding this ID enables managed services - encryption for new or updated data. Existing managed services data that - existed before adding the key remains not encrypted with the DEK until it is - modified. If the workspace already has customer-managed keys for managed - services, this request rotates (changes) the CMK keys and the DEK is + - Credential configuration ID - Network configuration ID. Used only if you + already use a customer-managed VPC. You cannot convert a running workspace + from a Databricks-managed VPC to a customer-managed VPC. You can use a network + configuration update in this API for a failed or running workspace to add + support for PrivateLink, although you also need to add a private access + settings object. - Key configuration ID for managed services (control plane + storage, such as notebook source and Databricks SQL queries). Databricks does + not directly encrypt the data with the customer-managed key (CMK). Databricks + uses both the CMK and the Databricks managed key (DMK) that is unique to your + workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to + encrypt your workspace's managed services persisted data. If the workspace + does not already have a CMK for managed services, adding this ID enables + managed services encryption for new or updated data. Existing managed services + data that existed before adding the key remains not encrypted with the DEK + until it is modified. If the workspace already has customer-managed keys for + managed services, this request rotates (changes) the CMK keys and the DEK is re-encrypted with the DMK and the new CMK. - Key configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). You can set this only if the workspace does not already have a customer-managed key @@ -503,7 +488,12 @@ func newUpdate() *cobra.Command { upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace. - Custom tags. Given you provide an empty - custom tags, the update would not be applied. + custom tags, the update would not be applied. - Network connectivity + configuration ID to add serverless stable IP support. You can add or update + the network connectivity configuration ID to ensure the workspace uses the + same set of stable IP CIDR blocks to access your resources. You cannot remove + a network connectivity configuration from the workspace once attached, you can + only switch to another one. **Important**: To update a running workspace, your workspace must have no running compute resources that run in your workspace's VPC in the Classic data @@ -523,11 +513,9 @@ func newUpdate() *cobra.Command { This results in a total of up to 40 minutes in which you cannot create clusters. If you create or use clusters before this time interval elapses, clusters do not launch successfully, fail, or could cause other unexpected - behavior. - - * For workspaces with a customer-managed VPC, the workspace status stays at - status RUNNING and the VPC change happens immediately. A change to the - storage customer-managed key configuration ID might take a few minutes to + behavior. * For workspaces with a customer-managed VPC, the workspace status + stays at status RUNNING and the VPC change happens immediately. A change to + the storage customer-managed key configuration ID might take a few minutes to update, so continue to check the workspace until you observe that it has been updated. If the update fails, the workspace might revert silently to its original configuration. After the workspace has been updated, you cannot use @@ -621,10 +609,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Workspaces diff --git a/cmd/workspace/alerts/alerts.go b/cmd/workspace/alerts/alerts.go index 773a34876..695fa6a94 100755 --- a/cmd/workspace/alerts/alerts.go +++ b/cmd/workspace/alerts/alerts.go @@ -31,6 +31,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -103,12 +110,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -178,12 +179,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -251,12 +246,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -299,12 +288,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -372,10 +355,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Alerts diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index f0bd6acf8..691584db7 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -32,6 +32,14 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDeleteApp()) + cmd.AddCommand(newGetApp()) + cmd.AddCommand(newGetAppDeploymentStatus()) + cmd.AddCommand(newGetApps()) + cmd.AddCommand(newGetEvents()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -58,7 +66,7 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - // TODO: output-only field + // TODO: any: resources cmd.Use = "create" cmd.Short = `Create and deploy an application.` @@ -101,12 +109,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete-app command // Slice with functions to override default command behavior. @@ -165,12 +167,6 @@ func newDeleteApp() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteApp()) - }) -} - // start get-app command // Slice with functions to override default command behavior. @@ -229,12 +225,6 @@ func newGetApp() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetApp()) - }) -} - // start get-app-deployment-status command // Slice with functions to override default command behavior. @@ -295,12 +285,6 @@ func newGetAppDeploymentStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetAppDeploymentStatus()) - }) -} - // start get-apps command // Slice with functions to override default command behavior. @@ -343,12 +327,6 @@ func newGetApps() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetApps()) - }) -} - // start get-events command // Slice with functions to override default command behavior. @@ -407,10 +385,4 @@ func newGetEvents() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetEvents()) - }) -} - // end service Apps diff --git a/cmd/workspace/artifact-allowlists/artifact-allowlists.go b/cmd/workspace/artifact-allowlists/artifact-allowlists.go index e0b36ff92..329ca9c3d 100755 --- a/cmd/workspace/artifact-allowlists/artifact-allowlists.go +++ b/cmd/workspace/artifact-allowlists/artifact-allowlists.go @@ -29,6 +29,10 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -99,12 +103,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -178,10 +176,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service ArtifactAllowlists diff --git a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go new file mode 100755 index 000000000..4c6e643de --- /dev/null +++ b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go @@ -0,0 +1,157 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package automatic_cluster_update + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "automatic-cluster-update", + Short: `Controls whether automatic cluster update is enabled for the current workspace.`, + Long: `Controls whether automatic cluster update is enabled for the current + workspace. By default, it is turned off.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetAutomaticClusterUpdateRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetAutomaticClusterUpdateRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the automatic cluster update setting.` + cmd.Long = `Get the automatic cluster update setting. + + Gets the automatic cluster update setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AutomaticClusterUpdate().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateAutomaticClusterUpdateSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateAutomaticClusterUpdateSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the automatic cluster update setting.` + cmd.Long = `Update the automatic cluster update setting. + + Updates the automatic cluster update setting for the workspace. A fresh etag + needs to be provided in PATCH requests (as part of the setting field). The + etag can be retrieved by making a GET request before the PATCH request. If + the setting is updated concurrently, PATCH fails with 409 and the request + must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.AutomaticClusterUpdate().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service AutomaticClusterUpdate diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 8e639023f..b08769420 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -34,6 +34,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -126,12 +133,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -193,12 +194,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -259,12 +254,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -308,12 +297,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -389,10 +372,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Catalogs diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index 4cee2ce6c..33facfb95 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -35,6 +35,13 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -105,12 +112,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -170,12 +171,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -237,12 +232,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -298,12 +287,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -387,10 +370,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service CleanRooms diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index f6edee2b3..15a75b1f7 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -49,6 +49,17 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newEdit()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -140,12 +151,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -237,12 +242,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start edit command // Slice with functions to override default command behavior. @@ -331,12 +330,6 @@ func newEdit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEdit()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -407,12 +400,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -483,12 +470,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -560,12 +541,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -619,12 +594,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -706,12 +675,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -793,10 +756,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service ClusterPolicies diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index cf35b2837..8d7737552 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -54,6 +54,28 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newChangeOwner()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newEdit()) + cmd.AddCommand(newEvents()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListNodeTypes()) + cmd.AddCommand(newListZones()) + cmd.AddCommand(newPermanentDelete()) + cmd.AddCommand(newPin()) + cmd.AddCommand(newResize()) + cmd.AddCommand(newRestart()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newSparkVersions()) + cmd.AddCommand(newStart()) + cmd.AddCommand(newUnpin()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -143,12 +165,6 @@ func newChangeOwner() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newChangeOwner()) - }) -} - // start create command // Slice with functions to override default command behavior. @@ -293,12 +309,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -409,12 +419,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start edit command // Slice with functions to override default command behavior. @@ -567,12 +571,6 @@ func newEdit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEdit()) - }) -} - // start events command // Slice with functions to override default command behavior. @@ -669,12 +667,6 @@ func newEvents() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEvents()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -751,12 +743,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -827,12 +813,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -904,12 +884,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -970,12 +944,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start list-node-types command // Slice with functions to override default command behavior. @@ -1019,12 +987,6 @@ func newListNodeTypes() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListNodeTypes()) - }) -} - // start list-zones command // Slice with functions to override default command behavior. @@ -1068,12 +1030,6 @@ func newListZones() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListZones()) - }) -} - // start permanent-delete command // Slice with functions to override default command behavior. @@ -1169,12 +1125,6 @@ func newPermanentDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPermanentDelete()) - }) -} - // start pin command // Slice with functions to override default command behavior. @@ -1267,12 +1217,6 @@ func newPin() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPin()) - }) -} - // start resize command // Slice with functions to override default command behavior. @@ -1384,12 +1328,6 @@ func newResize() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newResize()) - }) -} - // start restart command // Slice with functions to override default command behavior. @@ -1500,12 +1438,6 @@ func newRestart() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestart()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -1587,12 +1519,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start spark-versions command // Slice with functions to override default command behavior. @@ -1636,12 +1562,6 @@ func newSparkVersions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSparkVersions()) - }) -} - // start start command // Slice with functions to override default command behavior. @@ -1756,12 +1676,6 @@ func newStart() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newStart()) - }) -} - // start unpin command // Slice with functions to override default command behavior. @@ -1854,12 +1768,6 @@ func newUnpin() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUnpin()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -1941,10 +1849,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Clusters diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index e365be7d1..8b0022dcc 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -34,6 +34,7 @@ import ( model_registry "github.com/databricks/cli/cmd/workspace/model-registry" model_versions "github.com/databricks/cli/cmd/workspace/model-versions" online_tables "github.com/databricks/cli/cmd/workspace/online-tables" + permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration" permissions "github.com/databricks/cli/cmd/workspace/permissions" pipelines "github.com/databricks/cli/cmd/workspace/pipelines" policy_families "github.com/databricks/cli/cmd/workspace/policy-families" @@ -102,6 +103,7 @@ func All() []*cobra.Command { out = append(out, model_registry.New()) out = append(out, model_versions.New()) out = append(out, online_tables.New()) + out = append(out, permission_migration.New()) out = append(out, permissions.New()) out = append(out, pipelines.New()) out = append(out, policy_families.New()) diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index f740c7789..87ec52beb 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -37,6 +37,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -112,12 +119,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -188,12 +189,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -264,12 +259,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -309,12 +298,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -386,10 +369,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Connections diff --git a/cmd/workspace/credentials-manager/credentials-manager.go b/cmd/workspace/credentials-manager/credentials-manager.go index 132ba51ee..5a40232b3 100755 --- a/cmd/workspace/credentials-manager/credentials-manager.go +++ b/cmd/workspace/credentials-manager/credentials-manager.go @@ -31,6 +31,9 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newExchangeToken()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -99,10 +102,4 @@ func newExchangeToken() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newExchangeToken()) - }) -} - // end service CredentialsManager diff --git a/cmd/workspace/csp-enablement/csp-enablement.go b/cmd/workspace/csp-enablement/csp-enablement.go new file mode 100755 index 000000000..5e037f2ab --- /dev/null +++ b/cmd/workspace/csp-enablement/csp-enablement.go @@ -0,0 +1,160 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package csp_enablement + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "csp-enablement", + Short: `Controls whether to enable the compliance security profile for the current workspace.`, + Long: `Controls whether to enable the compliance security profile for the current + workspace. Enabling it on a workspace is permanent. By default, it is turned + off. + + This settings can NOT be disabled once it is enabled.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetCspEnablementRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetCspEnablementRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the compliance security profile setting.` + cmd.Long = `Get the compliance security profile setting. + + Gets the compliance security profile setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.CspEnablement().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateCspEnablementSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateCspEnablementSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the compliance security profile setting.` + cmd.Long = `Update the compliance security profile setting. + + Updates the compliance security profile setting for the workspace. A fresh + etag needs to be provided in PATCH requests (as part of the setting field). + The etag can be retrieved by making a GET request before the PATCH + request. If the setting is updated concurrently, PATCH fails with 409 and + the request must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.CspEnablement().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CSPEnablement diff --git a/cmd/workspace/current-user/current-user.go b/cmd/workspace/current-user/current-user.go index cb18e71d2..a42c3ead5 100755 --- a/cmd/workspace/current-user/current-user.go +++ b/cmd/workspace/current-user/current-user.go @@ -24,6 +24,9 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newMe()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -74,10 +77,4 @@ func newMe() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newMe()) - }) -} - // end service CurrentUser diff --git a/cmd/workspace/dashboard-widgets/dashboard-widgets.go b/cmd/workspace/dashboard-widgets/dashboard-widgets.go index 43a972e03..90463dd00 100755 --- a/cmd/workspace/dashboard-widgets/dashboard-widgets.go +++ b/cmd/workspace/dashboard-widgets/dashboard-widgets.go @@ -32,6 +32,11 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -97,12 +102,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -159,12 +158,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -231,10 +224,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service DashboardWidgets diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index e07f73926..3020cb606 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -32,6 +32,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRestore()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -97,12 +105,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -171,12 +173,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -245,12 +241,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -309,12 +299,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start restore command // Slice with functions to override default command behavior. @@ -382,12 +366,6 @@ func newRestore() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestore()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -469,10 +447,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Dashboards diff --git a/cmd/workspace/data-sources/data-sources.go b/cmd/workspace/data-sources/data-sources.go index 969399f42..0f0f8541e 100755 --- a/cmd/workspace/data-sources/data-sources.go +++ b/cmd/workspace/data-sources/data-sources.go @@ -32,6 +32,9 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -84,10 +87,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service DataSources diff --git a/cmd/workspace/default-namespace/default-namespace.go b/cmd/workspace/default-namespace/default-namespace.go new file mode 100755 index 000000000..38880dd57 --- /dev/null +++ b/cmd/workspace/default-namespace/default-namespace.go @@ -0,0 +1,229 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package default_namespace + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "default-namespace", + Short: `The default namespace setting API allows users to configure the default namespace for a Databricks workspace.`, + Long: `The default namespace setting API allows users to configure the default + namespace for a Databricks workspace. + + Through this API, users can retrieve, set, or modify the default namespace + used when queries do not reference a fully qualified three-level name. For + example, if you use the API to set 'retail_prod' as the default catalog, then + a query 'SELECT * FROM myTable' would reference the object + 'retail_prod.default.myTable' (the schema 'default' is always assumed). + + This setting requires a restart of clusters and SQL warehouses to take effect. + Additionally, the default namespace only applies when using Unity + Catalog-enabled compute.`, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteDefaultNamespaceRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteDefaultNamespaceRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the default namespace setting.` + cmd.Long = `Delete the default namespace setting. + + Deletes the default namespace setting for the workspace. A fresh etag needs to + be provided in DELETE requests (as a query parameter). The etag can be + retrieved by making a GET request before the DELETE request. If the + setting is updated/deleted concurrently, DELETE fails with 409 and the + request must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.DefaultNamespace().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetDefaultNamespaceRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetDefaultNamespaceRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the default namespace setting.` + cmd.Long = `Get the default namespace setting. + + Gets the default namespace setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.DefaultNamespace().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateDefaultNamespaceSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateDefaultNamespaceSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the default namespace setting.` + cmd.Long = `Update the default namespace setting. + + Updates the default namespace setting for the workspace. A fresh etag needs to + be provided in PATCH requests (as part of the setting field). The etag can + be retrieved by making a GET request before the PATCH request. Note that + if the setting does not exist, GET returns a NOT_FOUND error and the etag is + present in the error response, which should be set in the PATCH request. If + the setting is updated concurrently, PATCH fails with 409 and the request + must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.DefaultNamespace().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service DefaultNamespace diff --git a/cmd/workspace/esm-enablement/esm-enablement.go b/cmd/workspace/esm-enablement/esm-enablement.go new file mode 100755 index 000000000..a3da246fe --- /dev/null +++ b/cmd/workspace/esm-enablement/esm-enablement.go @@ -0,0 +1,162 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package esm_enablement + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "esm-enablement", + Short: `Controls whether enhanced security monitoring is enabled for the current workspace.`, + Long: `Controls whether enhanced security monitoring is enabled for the current + workspace. If the compliance security profile is enabled, this is + automatically enabled. By default, it is disabled. However, if the compliance + security profile is enabled, this is automatically enabled. + + If the compliance security profile is disabled, you can enable or disable this + setting and it is not permanent.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetEsmEnablementRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetEsmEnablementRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the enhanced security monitoring setting.` + cmd.Long = `Get the enhanced security monitoring setting. + + Gets the enhanced security monitoring setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.EsmEnablement().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateEsmEnablementSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateEsmEnablementSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the enhanced security monitoring setting.` + cmd.Long = `Update the enhanced security monitoring setting. + + Updates the enhanced security monitoring setting for the workspace. A fresh + etag needs to be provided in PATCH requests (as part of the setting field). + The etag can be retrieved by making a GET request before the PATCH + request. If the setting is updated concurrently, PATCH fails with 409 and + the request must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.EsmEnablement().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ESMEnablement diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 368ec7f94..50337390a 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -35,6 +35,38 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreateExperiment()) + cmd.AddCommand(newCreateRun()) + cmd.AddCommand(newDeleteExperiment()) + cmd.AddCommand(newDeleteRun()) + cmd.AddCommand(newDeleteRuns()) + cmd.AddCommand(newDeleteTag()) + cmd.AddCommand(newGetByName()) + cmd.AddCommand(newGetExperiment()) + cmd.AddCommand(newGetHistory()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetRun()) + cmd.AddCommand(newListArtifacts()) + cmd.AddCommand(newListExperiments()) + cmd.AddCommand(newLogBatch()) + cmd.AddCommand(newLogInputs()) + cmd.AddCommand(newLogMetric()) + cmd.AddCommand(newLogModel()) + cmd.AddCommand(newLogParam()) + cmd.AddCommand(newRestoreExperiment()) + cmd.AddCommand(newRestoreRun()) + cmd.AddCommand(newRestoreRuns()) + cmd.AddCommand(newSearchExperiments()) + cmd.AddCommand(newSearchRuns()) + cmd.AddCommand(newSetExperimentTag()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newSetTag()) + cmd.AddCommand(newUpdateExperiment()) + cmd.AddCommand(newUpdatePermissions()) + cmd.AddCommand(newUpdateRun()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -126,12 +158,6 @@ func newCreateExperiment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateExperiment()) - }) -} - // start create-run command // Slice with functions to override default command behavior. @@ -202,12 +228,6 @@ func newCreateRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateRun()) - }) -} - // start delete-experiment command // Slice with functions to override default command behavior. @@ -285,12 +305,6 @@ func newDeleteExperiment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteExperiment()) - }) -} - // start delete-run command // Slice with functions to override default command behavior. @@ -366,12 +380,6 @@ func newDeleteRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteRun()) - }) -} - // start delete-runs command // Slice with functions to override default command behavior. @@ -461,12 +469,6 @@ func newDeleteRuns() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteRuns()) - }) -} - // start delete-tag command // Slice with functions to override default command behavior. @@ -547,12 +549,6 @@ func newDeleteTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteTag()) - }) -} - // start get-by-name command // Slice with functions to override default command behavior. @@ -619,12 +615,6 @@ func newGetByName() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetByName()) - }) -} - // start get-experiment command // Slice with functions to override default command behavior. @@ -683,12 +673,6 @@ func newGetExperiment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetExperiment()) - }) -} - // start get-history command // Slice with functions to override default command behavior. @@ -749,12 +733,6 @@ func newGetHistory() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetHistory()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -813,12 +791,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -878,12 +850,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start get-run command // Slice with functions to override default command behavior. @@ -949,12 +915,6 @@ func newGetRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetRun()) - }) -} - // start list-artifacts command // Slice with functions to override default command behavior. @@ -1011,12 +971,6 @@ func newListArtifacts() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListArtifacts()) - }) -} - // start list-experiments command // Slice with functions to override default command behavior. @@ -1071,12 +1025,6 @@ func newListExperiments() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListExperiments()) - }) -} - // start log-batch command // Slice with functions to override default command behavior. @@ -1180,12 +1128,6 @@ func newLogBatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogBatch()) - }) -} - // start log-inputs command // Slice with functions to override default command behavior. @@ -1252,12 +1194,6 @@ func newLogInputs() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogInputs()) - }) -} - // start log-metric command // Slice with functions to override default command behavior. @@ -1353,12 +1289,6 @@ func newLogMetric() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogMetric()) - }) -} - // start log-model command // Slice with functions to override default command behavior. @@ -1425,12 +1355,6 @@ func newLogModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogModel()) - }) -} - // start log-param command // Slice with functions to override default command behavior. @@ -1516,12 +1440,6 @@ func newLogParam() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogParam()) - }) -} - // start restore-experiment command // Slice with functions to override default command behavior. @@ -1602,12 +1520,6 @@ func newRestoreExperiment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestoreExperiment()) - }) -} - // start restore-run command // Slice with functions to override default command behavior. @@ -1683,12 +1595,6 @@ func newRestoreRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestoreRun()) - }) -} - // start restore-runs command // Slice with functions to override default command behavior. @@ -1778,12 +1684,6 @@ func newRestoreRuns() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestoreRuns()) - }) -} - // start search-experiments command // Slice with functions to override default command behavior. @@ -1849,12 +1749,6 @@ func newSearchExperiments() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSearchExperiments()) - }) -} - // start search-runs command // Slice with functions to override default command behavior. @@ -1923,12 +1817,6 @@ func newSearchRuns() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSearchRuns()) - }) -} - // start set-experiment-tag command // Slice with functions to override default command behavior. @@ -2015,12 +1903,6 @@ func newSetExperimentTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetExperimentTag()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -2090,12 +1972,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start set-tag command // Slice with functions to override default command behavior. @@ -2182,12 +2058,6 @@ func newSetTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetTag()) - }) -} - // start update-experiment command // Slice with functions to override default command behavior. @@ -2265,12 +2135,6 @@ func newUpdateExperiment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateExperiment()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -2340,12 +2204,6 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // start update-run command // Slice with functions to override default command behavior. @@ -2413,10 +2271,4 @@ func newUpdateRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateRun()) - }) -} - // end service Experiments diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index 7ddc0d842..76e460050 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -39,6 +39,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -138,12 +145,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -205,12 +206,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -271,12 +266,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -335,12 +324,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -420,10 +403,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service ExternalLocations diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index d1db1ec97..5b1b90241 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -32,6 +32,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -103,12 +110,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -187,12 +188,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -270,12 +265,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -343,12 +332,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -437,10 +420,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Functions diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index 8984a9538..ca8a1c274 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -32,6 +32,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -123,12 +130,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -202,12 +203,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -281,12 +276,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -327,12 +316,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -418,10 +401,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service GitCredentials diff --git a/cmd/workspace/global-init-scripts/global-init-scripts.go b/cmd/workspace/global-init-scripts/global-init-scripts.go index de08614fe..0461b4514 100755 --- a/cmd/workspace/global-init-scripts/global-init-scripts.go +++ b/cmd/workspace/global-init-scripts/global-init-scripts.go @@ -35,6 +35,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -125,12 +132,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -201,12 +202,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -277,12 +272,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -325,12 +314,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -416,10 +399,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service GlobalInitScripts diff --git a/cmd/workspace/grants/grants.go b/cmd/workspace/grants/grants.go index 020e0bf8b..851c3cfbe 100755 --- a/cmd/workspace/grants/grants.go +++ b/cmd/workspace/grants/grants.go @@ -37,6 +37,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetEffective()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -110,12 +115,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-effective command // Slice with functions to override default command behavior. @@ -181,12 +180,6 @@ func newGetEffective() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetEffective()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -260,10 +253,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Grants diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index aba54b8be..2fc632201 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -33,6 +33,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -114,12 +122,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -190,12 +192,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -266,12 +262,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -330,12 +320,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -417,12 +401,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -511,10 +489,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Groups diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index c9389fef8..8000365b0 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -44,6 +44,17 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newEdit()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -148,12 +159,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -245,12 +250,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start edit command // Slice with functions to override default command behavior. @@ -344,12 +343,6 @@ func newEdit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEdit()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -420,12 +413,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -496,12 +483,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -573,12 +554,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -618,12 +593,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -705,12 +674,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -792,10 +755,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service InstancePools diff --git a/cmd/workspace/instance-profiles/instance-profiles.go b/cmd/workspace/instance-profiles/instance-profiles.go index 2077c4bfc..919ec511d 100755 --- a/cmd/workspace/instance-profiles/instance-profiles.go +++ b/cmd/workspace/instance-profiles/instance-profiles.go @@ -32,6 +32,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newAdd()) + cmd.AddCommand(newEdit()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRemove()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -121,12 +127,6 @@ func newAdd() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newAdd()) - }) -} - // start edit command // Slice with functions to override default command behavior. @@ -220,12 +220,6 @@ func newEdit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEdit()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -267,12 +261,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start remove command // Slice with functions to override default command behavior. @@ -351,10 +339,4 @@ func newRemove() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRemove()) - }) -} - // end service InstanceProfiles diff --git a/cmd/workspace/ip-access-lists/ip-access-lists.go b/cmd/workspace/ip-access-lists/ip-access-lists.go index 9eb08cb43..2b6ddfa23 100755 --- a/cmd/workspace/ip-access-lists/ip-access-lists.go +++ b/cmd/workspace/ip-access-lists/ip-access-lists.go @@ -47,6 +47,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newReplace()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -159,12 +167,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -235,12 +237,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -311,12 +307,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -356,12 +346,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start replace command // Slice with functions to override default command behavior. @@ -471,12 +455,6 @@ func newReplace() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReplace()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -575,10 +553,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service IpAccessLists diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 957aa6093..8a98e1c85 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -45,6 +45,28 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCancelAllRuns()) + cmd.AddCommand(newCancelRun()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeleteRun()) + cmd.AddCommand(newExportRun()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetRun()) + cmd.AddCommand(newGetRunOutput()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListRuns()) + cmd.AddCommand(newRepairRun()) + cmd.AddCommand(newReset()) + cmd.AddCommand(newRunNow()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newSubmit()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -119,12 +141,6 @@ func newCancelAllRuns() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCancelAllRuns()) - }) -} - // start cancel-run command // Slice with functions to override default command behavior. @@ -243,12 +259,6 @@ func newCancelRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCancelRun()) - }) -} - // start create command // Slice with functions to override default command behavior. @@ -308,12 +318,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -407,12 +411,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start delete-run command // Slice with functions to override default command behavior. @@ -506,12 +504,6 @@ func newDeleteRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteRun()) - }) -} - // start export-run command // Slice with functions to override default command behavior. @@ -587,12 +579,6 @@ func newExportRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newExportRun()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -667,12 +653,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -743,12 +723,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -820,12 +794,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start get-run command // Slice with functions to override default command behavior. @@ -908,12 +876,6 @@ func newGetRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetRun()) - }) -} - // start get-run-output command // Slice with functions to override default command behavior. @@ -996,12 +958,6 @@ func newGetRunOutput() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetRunOutput()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -1058,12 +1014,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start list-runs command // Slice with functions to override default command behavior. @@ -1125,12 +1075,6 @@ func newListRuns() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListRuns()) - }) -} - // start repair-run command // Slice with functions to override default command behavior. @@ -1264,12 +1208,6 @@ func newRepairRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRepairRun()) - }) -} - // start reset command // Slice with functions to override default command behavior. @@ -1330,12 +1268,6 @@ func newReset() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReset()) - }) -} - // start run-now command // Slice with functions to override default command behavior. @@ -1465,12 +1397,6 @@ func newRunNow() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRunNow()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -1552,12 +1478,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start submit command // Slice with functions to override default command behavior. @@ -1659,12 +1579,6 @@ func newSubmit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSubmit()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -1762,12 +1676,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -1849,10 +1757,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Jobs diff --git a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go index 13383f36f..9559d036d 100755 --- a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go +++ b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go @@ -34,6 +34,16 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCancelRefresh()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetRefresh()) + cmd.AddCommand(newListRefreshes()) + cmd.AddCommand(newRunRefresh()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -114,12 +124,6 @@ func newCancelRefresh() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCancelRefresh()) - }) -} - // start create command // Slice with functions to override default command behavior. @@ -146,7 +150,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: schedule cmd.Flags().BoolVar(&createReq.SkipBuiltinDashboard, "skip-builtin-dashboard", createReq.SkipBuiltinDashboard, `Whether to skip creating a default dashboard summarizing data quality metrics.`) // TODO: array: slicing_exprs - // TODO: output-only field + // TODO: complex arg: snapshot // TODO: complex arg: time_series cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `Optional argument to specify the warehouse for dashboard creation.`) @@ -223,12 +227,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -299,12 +297,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -374,12 +366,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-refresh command // Slice with functions to override default command behavior. @@ -449,12 +435,6 @@ func newGetRefresh() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetRefresh()) - }) -} - // start list-refreshes command // Slice with functions to override default command behavior. @@ -523,12 +503,6 @@ func newListRefreshes() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListRefreshes()) - }) -} - // start run-refresh command // Slice with functions to override default command behavior. @@ -597,12 +571,6 @@ func newRunRefresh() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRunRefresh()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -628,7 +596,7 @@ func newUpdate() *cobra.Command { // TODO: array: notifications // TODO: complex arg: schedule // TODO: array: slicing_exprs - // TODO: output-only field + // TODO: complex arg: snapshot // TODO: complex arg: time_series cmd.Use = "update FULL_NAME OUTPUT_SCHEMA_NAME" @@ -702,10 +670,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service LakehouseMonitors diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index a6dddd0de..a81483997 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -26,6 +26,9 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newPublish()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -103,10 +106,4 @@ func newPublish() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPublish()) - }) -} - // end service Lakeview diff --git a/cmd/workspace/libraries/libraries.go b/cmd/workspace/libraries/libraries.go index fef81c25f..d6761a821 100755 --- a/cmd/workspace/libraries/libraries.go +++ b/cmd/workspace/libraries/libraries.go @@ -46,6 +46,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newAllClusterStatuses()) + cmd.AddCommand(newClusterStatus()) + cmd.AddCommand(newInstall()) + cmd.AddCommand(newUninstall()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -98,12 +104,6 @@ func newAllClusterStatuses() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newAllClusterStatuses()) - }) -} - // start cluster-status command // Slice with functions to override default command behavior. @@ -173,12 +173,6 @@ func newClusterStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newClusterStatus()) - }) -} - // start install command // Slice with functions to override default command behavior. @@ -243,12 +237,6 @@ func newInstall() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newInstall()) - }) -} - // start uninstall command // Slice with functions to override default command behavior. @@ -310,10 +298,4 @@ func newUninstall() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUninstall()) - }) -} - // end service Libraries diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index d63576d4e..97e77a479 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -39,6 +39,18 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newAssign()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newCurrent()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSummary()) + cmd.AddCommand(newUnassign()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdateAssignment()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -133,12 +145,6 @@ func newAssign() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newAssign()) - }) -} - // start create command // Slice with functions to override default command behavior. @@ -221,12 +227,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start current command // Slice with functions to override default command behavior. @@ -269,12 +269,6 @@ func newCurrent() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCurrent()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -347,12 +341,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -424,12 +412,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -471,12 +453,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start summary command // Slice with functions to override default command behavior. @@ -520,12 +496,6 @@ func newSummary() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSummary()) - }) -} - // start unassign command // Slice with functions to override default command behavior. @@ -589,12 +559,6 @@ func newUnassign() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUnassign()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -683,12 +647,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-assignment command // Slice with functions to override default command behavior. @@ -776,10 +734,4 @@ func newUpdateAssignment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateAssignment()) - }) -} - // end service Metastores diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index 9c6034b56..74e5e66e3 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -34,6 +34,44 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newApproveTransitionRequest()) + cmd.AddCommand(newCreateComment()) + cmd.AddCommand(newCreateModel()) + cmd.AddCommand(newCreateModelVersion()) + cmd.AddCommand(newCreateTransitionRequest()) + cmd.AddCommand(newCreateWebhook()) + cmd.AddCommand(newDeleteComment()) + cmd.AddCommand(newDeleteModel()) + cmd.AddCommand(newDeleteModelTag()) + cmd.AddCommand(newDeleteModelVersion()) + cmd.AddCommand(newDeleteModelVersionTag()) + cmd.AddCommand(newDeleteTransitionRequest()) + cmd.AddCommand(newDeleteWebhook()) + cmd.AddCommand(newGetLatestVersions()) + cmd.AddCommand(newGetModel()) + cmd.AddCommand(newGetModelVersion()) + cmd.AddCommand(newGetModelVersionDownloadUri()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newListModels()) + cmd.AddCommand(newListTransitionRequests()) + cmd.AddCommand(newListWebhooks()) + cmd.AddCommand(newRejectTransitionRequest()) + cmd.AddCommand(newRenameModel()) + cmd.AddCommand(newSearchModelVersions()) + cmd.AddCommand(newSearchModels()) + cmd.AddCommand(newSetModelTag()) + cmd.AddCommand(newSetModelVersionTag()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newTestRegistryWebhook()) + cmd.AddCommand(newTransitionStage()) + cmd.AddCommand(newUpdateComment()) + cmd.AddCommand(newUpdateModel()) + cmd.AddCommand(newUpdateModelVersion()) + cmd.AddCommand(newUpdatePermissions()) + cmd.AddCommand(newUpdateWebhook()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -146,12 +184,6 @@ func newApproveTransitionRequest() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newApproveTransitionRequest()) - }) -} - // start create-comment command // Slice with functions to override default command behavior. @@ -237,12 +269,6 @@ func newCreateComment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateComment()) - }) -} - // start create-model command // Slice with functions to override default command behavior. @@ -324,12 +350,6 @@ func newCreateModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateModel()) - }) -} - // start create-model-version command // Slice with functions to override default command behavior. @@ -414,12 +434,6 @@ func newCreateModelVersion() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateModelVersion()) - }) -} - // start create-transition-request command // Slice with functions to override default command behavior. @@ -516,12 +530,6 @@ func newCreateTransitionRequest() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateTransitionRequest()) - }) -} - // start create-webhook command // Slice with functions to override default command behavior. @@ -589,12 +597,6 @@ func newCreateWebhook() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateWebhook()) - }) -} - // start delete-comment command // Slice with functions to override default command behavior. @@ -650,12 +652,6 @@ func newDeleteComment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteComment()) - }) -} - // start delete-model command // Slice with functions to override default command behavior. @@ -714,12 +710,6 @@ func newDeleteModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteModel()) - }) -} - // start delete-model-tag command // Slice with functions to override default command behavior. @@ -781,12 +771,6 @@ func newDeleteModelTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteModelTag()) - }) -} - // start delete-model-version command // Slice with functions to override default command behavior. @@ -847,12 +831,6 @@ func newDeleteModelVersion() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteModelVersion()) - }) -} - // start delete-model-version-tag command // Slice with functions to override default command behavior. @@ -916,12 +894,6 @@ func newDeleteModelVersionTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteModelVersionTag()) - }) -} - // start delete-transition-request command // Slice with functions to override default command behavior. @@ -1001,12 +973,6 @@ func newDeleteTransitionRequest() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteTransitionRequest()) - }) -} - // start delete-webhook command // Slice with functions to override default command behavior. @@ -1064,12 +1030,6 @@ func newDeleteWebhook() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteWebhook()) - }) -} - // start get-latest-versions command // Slice with functions to override default command behavior. @@ -1144,12 +1104,6 @@ func newGetLatestVersions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetLatestVersions()) - }) -} - // start get-model command // Slice with functions to override default command behavior. @@ -1212,12 +1166,6 @@ func newGetModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetModel()) - }) -} - // start get-model-version command // Slice with functions to override default command behavior. @@ -1278,12 +1226,6 @@ func newGetModelVersion() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetModelVersion()) - }) -} - // start get-model-version-download-uri command // Slice with functions to override default command behavior. @@ -1344,12 +1286,6 @@ func newGetModelVersionDownloadUri() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetModelVersionDownloadUri()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -1408,12 +1344,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -1473,12 +1403,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list-models command // Slice with functions to override default command behavior. @@ -1533,12 +1457,6 @@ func newListModels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListModels()) - }) -} - // start list-transition-requests command // Slice with functions to override default command behavior. @@ -1596,12 +1514,6 @@ func newListTransitionRequests() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListTransitionRequests()) - }) -} - // start list-webhooks command // Slice with functions to override default command behavior. @@ -1658,12 +1570,6 @@ func newListWebhooks() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListWebhooks()) - }) -} - // start reject-transition-request command // Slice with functions to override default command behavior. @@ -1760,12 +1666,6 @@ func newRejectTransitionRequest() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRejectTransitionRequest()) - }) -} - // start rename-model command // Slice with functions to override default command behavior. @@ -1843,12 +1743,6 @@ func newRenameModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRenameModel()) - }) -} - // start search-model-versions command // Slice with functions to override default command behavior. @@ -1904,12 +1798,6 @@ func newSearchModelVersions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSearchModelVersions()) - }) -} - // start search-models command // Slice with functions to override default command behavior. @@ -1965,12 +1853,6 @@ func newSearchModels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSearchModels()) - }) -} - // start set-model-tag command // Slice with functions to override default command behavior. @@ -2059,12 +1941,6 @@ func newSetModelTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetModelTag()) - }) -} - // start set-model-version-tag command // Slice with functions to override default command behavior. @@ -2157,12 +2033,6 @@ func newSetModelVersionTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetModelVersionTag()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -2232,12 +2102,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start test-registry-webhook command // Slice with functions to override default command behavior. @@ -2330,12 +2194,6 @@ func newTestRegistryWebhook() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newTestRegistryWebhook()) - }) -} - // start transition-stage command // Slice with functions to override default command behavior. @@ -2444,12 +2302,6 @@ func newTransitionStage() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newTransitionStage()) - }) -} - // start update-comment command // Slice with functions to override default command behavior. @@ -2529,12 +2381,6 @@ func newUpdateComment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateComment()) - }) -} - // start update-model command // Slice with functions to override default command behavior. @@ -2612,12 +2458,6 @@ func newUpdateModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateModel()) - }) -} - // start update-model-version command // Slice with functions to override default command behavior. @@ -2699,12 +2539,6 @@ func newUpdateModelVersion() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateModelVersion()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -2774,12 +2608,6 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // start update-webhook command // Slice with functions to override default command behavior. @@ -2863,10 +2691,4 @@ func newUpdateWebhook() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateWebhook()) - }) -} - // end service ModelRegistry diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index b4492cb36..b322e8807 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -33,6 +33,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetByAlias()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -110,12 +117,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -184,12 +185,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-by-alias command // Slice with functions to override default command behavior. @@ -255,12 +250,6 @@ func newGetByAlias() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetByAlias()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -331,12 +320,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -417,10 +400,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service ModelVersions diff --git a/cmd/workspace/online-tables/online-tables.go b/cmd/workspace/online-tables/online-tables.go index d97c52837..2a5574da9 100755 --- a/cmd/workspace/online-tables/online-tables.go +++ b/cmd/workspace/online-tables/online-tables.go @@ -26,6 +26,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -99,12 +104,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -165,12 +164,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -229,10 +222,4 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // end service OnlineTables diff --git a/cmd/workspace/permission-migration/permission-migration.go b/cmd/workspace/permission-migration/permission-migration.go new file mode 100755 index 000000000..a957d5ca3 --- /dev/null +++ b/cmd/workspace/permission-migration/permission-migration.go @@ -0,0 +1,136 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package permission_migration + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "permission-migration", + Short: `This spec contains undocumented permission migration APIs used in https://github.com/databrickslabs/ucx.`, + Long: `This spec contains undocumented permission migration APIs used in + https://github.com/databrickslabs/ucx.`, + GroupID: "iam", + Annotations: map[string]string{ + "package": "iam", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newMigratePermissions()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start migrate-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var migratePermissionsOverrides []func( + *cobra.Command, + *iam.PermissionMigrationRequest, +) + +func newMigratePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var migratePermissionsReq iam.PermissionMigrationRequest + var migratePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&migratePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().IntVar(&migratePermissionsReq.Size, "size", migratePermissionsReq.Size, `The maximum number of permissions that will be migrated.`) + + cmd.Use = "migrate-permissions WORKSPACE_ID FROM_WORKSPACE_GROUP_NAME TO_ACCOUNT_GROUP_NAME" + cmd.Short = `Migrate Permissions.` + cmd.Long = `Migrate Permissions. + + Migrate a batch of permissions from a workspace local group to an account + group. + + Arguments: + WORKSPACE_ID: WorkspaceId of the associated workspace where the permission migration + will occur. Both workspace group and account group must be in this + workspace. + FROM_WORKSPACE_GROUP_NAME: The name of the workspace group that permissions will be migrated from. + TO_ACCOUNT_GROUP_NAME: The name of the account group that permissions will be migrated to.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := cobra.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'workspace_id', 'from_workspace_group_name', 'to_account_group_name' in your JSON input") + } + return nil + } + check := cobra.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = migratePermissionsJson.Unmarshal(&migratePermissionsReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[0], &migratePermissionsReq.WorkspaceId) + if err != nil { + return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) + } + } + if !cmd.Flags().Changed("json") { + migratePermissionsReq.FromWorkspaceGroupName = args[1] + } + if !cmd.Flags().Changed("json") { + migratePermissionsReq.ToAccountGroupName = args[2] + } + + response, err := w.PermissionMigration.MigratePermissions(ctx, migratePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range migratePermissionsOverrides { + fn(cmd, &migratePermissionsReq) + } + + return cmd +} + +// end service PermissionMigration diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index 8aeb3fc73..5bf837e35 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -71,6 +71,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newSet()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -143,12 +149,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -209,12 +209,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start set command // Slice with functions to override default command behavior. @@ -289,12 +283,6 @@ func newSet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSet()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -369,10 +357,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Permissions diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 4c2db6aa3..78f42d6cd 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -41,6 +41,22 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetUpdate()) + cmd.AddCommand(newListPipelineEvents()) + cmd.AddCommand(newListPipelines()) + cmd.AddCommand(newListUpdates()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newStartUpdate()) + cmd.AddCommand(newStop()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -109,12 +125,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -182,12 +192,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -258,12 +262,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -334,12 +332,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -411,12 +403,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start get-update command // Slice with functions to override default command behavior. @@ -477,12 +463,6 @@ func newGetUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetUpdate()) - }) -} - // start list-pipeline-events command // Slice with functions to override default command behavior. @@ -552,12 +532,6 @@ func newListPipelineEvents() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListPipelineEvents()) - }) -} - // start list-pipelines command // Slice with functions to override default command behavior. @@ -613,12 +587,6 @@ func newListPipelines() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListPipelines()) - }) -} - // start list-updates command // Slice with functions to override default command behavior. @@ -693,12 +661,6 @@ func newListUpdates() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListUpdates()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -780,12 +742,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start start-update command // Slice with functions to override default command behavior. @@ -875,12 +831,6 @@ func newStartUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newStartUpdate()) - }) -} - // start stop command // Slice with functions to override default command behavior. @@ -966,12 +916,6 @@ func newStop() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newStop()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -1071,12 +1015,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -1158,10 +1096,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Pipelines diff --git a/cmd/workspace/policy-families/policy-families.go b/cmd/workspace/policy-families/policy-families.go index c81d2e92c..f6c07bf70 100755 --- a/cmd/workspace/policy-families/policy-families.go +++ b/cmd/workspace/policy-families/policy-families.go @@ -32,6 +32,10 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -95,12 +99,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -154,10 +152,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service PolicyFamilies diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index 255296488..93f89c981 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -29,6 +29,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListShares()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -123,12 +131,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -200,12 +202,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -278,12 +274,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -339,12 +329,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start list-shares command // Slice with functions to override default command behavior. @@ -414,12 +398,6 @@ func newListShares() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListShares()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -506,10 +484,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Providers diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index ef2de4466..f2ab6f59c 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -30,6 +30,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRestore()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -105,12 +113,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -180,12 +182,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -254,12 +250,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -319,12 +309,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start restore command // Slice with functions to override default command behavior. @@ -393,12 +377,6 @@ func newRestore() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestore()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -483,10 +461,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Queries diff --git a/cmd/workspace/query-history/query-history.go b/cmd/workspace/query-history/query-history.go index 847461058..a0402e6d0 100755 --- a/cmd/workspace/query-history/query-history.go +++ b/cmd/workspace/query-history/query-history.go @@ -24,6 +24,9 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -89,10 +92,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service QueryHistory diff --git a/cmd/workspace/query-visualizations/query-visualizations.go b/cmd/workspace/query-visualizations/query-visualizations.go index 4f04c4261..4161ac7d5 100755 --- a/cmd/workspace/query-visualizations/query-visualizations.go +++ b/cmd/workspace/query-visualizations/query-visualizations.go @@ -32,6 +32,11 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -97,12 +102,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -159,12 +158,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -231,10 +224,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service QueryVisualizations diff --git a/cmd/workspace/recipient-activation/recipient-activation.go b/cmd/workspace/recipient-activation/recipient-activation.go index 5fb5c7b9e..068e6bb10 100755 --- a/cmd/workspace/recipient-activation/recipient-activation.go +++ b/cmd/workspace/recipient-activation/recipient-activation.go @@ -33,6 +33,10 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGetActivationUrlInfo()) + cmd.AddCommand(newRetrieveToken()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -99,12 +103,6 @@ func newGetActivationUrlInfo() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetActivationUrlInfo()) - }) -} - // start retrieve-token command // Slice with functions to override default command behavior. @@ -164,10 +162,4 @@ func newRetrieveToken() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRetrieveToken()) - }) -} - // end service RecipientActivation diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index d7d432b9c..797863137 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -43,6 +43,15 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRotateToken()) + cmd.AddCommand(newSharePermissions()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -142,12 +151,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -219,12 +222,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -297,12 +294,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -358,12 +349,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start rotate-token command // Slice with functions to override default command behavior. @@ -448,12 +433,6 @@ func newRotateToken() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRotateToken()) - }) -} - // start share-permissions command // Slice with functions to override default command behavior. @@ -525,12 +504,6 @@ func newSharePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSharePermissions()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -617,10 +590,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Recipients diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index 98aec3bb3..6cd01c137 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -55,6 +55,15 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeleteAlias()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetAlias()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -160,12 +169,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -242,12 +245,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start delete-alias command // Slice with functions to override default command behavior. @@ -313,12 +310,6 @@ func newDeleteAlias() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteAlias()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -394,12 +385,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -466,12 +451,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-alias command // Slice with functions to override default command behavior. @@ -559,12 +538,6 @@ func newSetAlias() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetAlias()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -655,10 +628,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service RegisteredModels diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index 0c38183aa..6a989437a 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -36,6 +36,17 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -130,12 +141,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -209,12 +214,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -288,12 +287,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -364,12 +357,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -441,12 +428,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -501,12 +482,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -588,12 +563,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -680,12 +649,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -767,10 +730,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Repos diff --git a/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go b/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go new file mode 100755 index 000000000..d6b8a8424 --- /dev/null +++ b/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go @@ -0,0 +1,227 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package restrict_workspace_admins + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "restrict-workspace-admins", + Short: `The Restrict Workspace Admins setting lets you control the capabilities of workspace admins.`, + Long: `The Restrict Workspace Admins setting lets you control the capabilities of + workspace admins. With the setting status set to ALLOW_ALL, workspace admins + can create service principal personal access tokens on behalf of any service + principal in their workspace. Workspace admins can also change a job owner to + any user in their workspace. And they can change the job run_as setting to any + user in their workspace or to a service principal on which they have the + Service Principal User role. With the setting status set to + RESTRICT_TOKENS_AND_JOB_RUN_AS, workspace admins can only create personal + access tokens on behalf of service principals they have the Service Principal + User role on. They can also only change a job owner to themselves. And they + can change the job run_as setting to themselves or to a service principal on + which they have the Service Principal User role.`, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteRestrictWorkspaceAdminRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteRestrictWorkspaceAdminRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the restrict workspace admins setting.` + cmd.Long = `Delete the restrict workspace admins setting. + + Reverts the restrict workspace admins setting status for the workspace. A + fresh etag needs to be provided in DELETE requests (as a query parameter). + The etag can be retrieved by making a GET request before the DELETE request. + If the setting is updated/deleted concurrently, DELETE fails with 409 and + the request must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.RestrictWorkspaceAdmins().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetRestrictWorkspaceAdminRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetRestrictWorkspaceAdminRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the restrict workspace admins setting.` + cmd.Long = `Get the restrict workspace admins setting. + + Gets the restrict workspace admins setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := cobra.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.RestrictWorkspaceAdmins().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateRestrictWorkspaceAdminsSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateRestrictWorkspaceAdminsSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the restrict workspace admins setting.` + cmd.Long = `Update the restrict workspace admins setting. + + Updates the restrict workspace admins setting for the workspace. A fresh etag + needs to be provided in PATCH requests (as part of the setting field). The + etag can be retrieved by making a GET request before the PATCH request. If + the setting is updated concurrently, PATCH fails with 409 and the request + must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.RestrictWorkspaceAdmins().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service RestrictWorkspaceAdmins diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index ebdab2ab5..a5efeed37 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -31,6 +31,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -124,12 +131,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -201,12 +202,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -279,12 +274,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -349,12 +338,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -443,10 +426,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Schemas diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index ec6423d06..35b84907e 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -38,6 +38,18 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreateScope()) + cmd.AddCommand(newDeleteAcl()) + cmd.AddCommand(newDeleteScope()) + cmd.AddCommand(newDeleteSecret()) + cmd.AddCommand(newGetAcl()) + cmd.AddCommand(newGetSecret()) + cmd.AddCommand(newListAcls()) + cmd.AddCommand(newListScopes()) + cmd.AddCommand(newListSecrets()) + cmd.AddCommand(newPutAcl()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -127,12 +139,6 @@ func newCreateScope() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateScope()) - }) -} - // start delete-acl command // Slice with functions to override default command behavior. @@ -217,12 +223,6 @@ func newDeleteAcl() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteAcl()) - }) -} - // start delete-scope command // Slice with functions to override default command behavior. @@ -302,12 +302,6 @@ func newDeleteScope() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteScope()) - }) -} - // start delete-secret command // Slice with functions to override default command behavior. @@ -392,12 +386,6 @@ func newDeleteSecret() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteSecret()) - }) -} - // start get-acl command // Slice with functions to override default command behavior. @@ -463,12 +451,6 @@ func newGetAcl() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetAcl()) - }) -} - // start get-secret command // Slice with functions to override default command behavior. @@ -540,12 +522,6 @@ func newGetSecret() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetSecret()) - }) -} - // start list-acls command // Slice with functions to override default command behavior. @@ -606,12 +582,6 @@ func newListAcls() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListAcls()) - }) -} - // start list-scopes command // Slice with functions to override default command behavior. @@ -654,12 +624,6 @@ func newListScopes() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListScopes()) - }) -} - // start list-secrets command // Slice with functions to override default command behavior. @@ -722,12 +686,6 @@ func newListSecrets() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListSecrets()) - }) -} - // start put-acl command // Slice with functions to override default command behavior. @@ -839,10 +797,4 @@ func newPutAcl() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPutAcl()) - }) -} - // end service Secrets diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index 353c08761..d363a1ba1 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -32,6 +32,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -112,12 +120,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -188,12 +190,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -265,12 +261,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -329,12 +319,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -417,12 +401,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -513,10 +491,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service ServicePrincipals diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 9424c5e4e..c4ca7d62d 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -40,6 +40,23 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newBuildLogs()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newExportMetrics()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newLogs()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newPut()) + cmd.AddCommand(newQuery()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdateConfig()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -111,12 +128,6 @@ func newBuildLogs() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newBuildLogs()) - }) -} - // start create command // Slice with functions to override default command behavior. @@ -195,12 +206,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -257,12 +262,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start export-metrics command // Slice with functions to override default command behavior. @@ -323,12 +322,6 @@ func newExportMetrics() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newExportMetrics()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -387,12 +380,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -451,12 +438,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -516,12 +497,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -559,12 +534,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start logs command // Slice with functions to override default command behavior. @@ -628,12 +597,6 @@ func newLogs() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogs()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -705,12 +668,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start put command // Slice with functions to override default command behavior. @@ -781,12 +738,6 @@ func newPut() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPut()) - }) -} - // start query command // Slice with functions to override default command behavior. @@ -865,12 +816,6 @@ func newQuery() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newQuery()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -940,12 +885,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update-config command // Slice with functions to override default command behavior. @@ -1038,12 +977,6 @@ func newUpdateConfig() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateConfig()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -1113,10 +1046,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service ServingEndpoints diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 35b65eb64..38e19e839 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -3,13 +3,13 @@ package settings import ( - "fmt" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/settings" "github.com/spf13/cobra" + + automatic_cluster_update "github.com/databricks/cli/cmd/workspace/automatic-cluster-update" + csp_enablement "github.com/databricks/cli/cmd/workspace/csp-enablement" + default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace" + esm_enablement "github.com/databricks/cli/cmd/workspace/esm-enablement" + restrict_workspace_admins "github.com/databricks/cli/cmd/workspace/restrict-workspace-admins" ) // Slice with functions to override default command behavior. @@ -18,26 +18,22 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "settings", - Short: `The default namespace setting API allows users to configure the default namespace for a Databricks workspace.`, - Long: `The default namespace setting API allows users to configure the default - namespace for a Databricks workspace. - - Through this API, users can retrieve, set, or modify the default namespace - used when queries do not reference a fully qualified three-level name. For - example, if you use the API to set 'retail_prod' as the default catalog, then - a query 'SELECT * FROM myTable' would reference the object - 'retail_prod.default.myTable' (the schema 'default' is always assumed). - - This setting requires a restart of clusters and SQL warehouses to take effect. - Additionally, the default namespace only applies when using Unity - Catalog-enabled compute.`, + Use: "settings", + Short: `Workspace Settings API allows users to manage settings at the workspace level.`, + Long: `Workspace Settings API allows users to manage settings at the workspace level.`, GroupID: "settings", Annotations: map[string]string{ "package": "settings", }, } + // Add subservices + cmd.AddCommand(automatic_cluster_update.New()) + cmd.AddCommand(csp_enablement.New()) + cmd.AddCommand(default_namespace.New()) + cmd.AddCommand(esm_enablement.New()) + cmd.AddCommand(restrict_workspace_admins.New()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -46,396 +42,4 @@ func New() *cobra.Command { return cmd } -// start delete-default-namespace-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deleteDefaultNamespaceSettingOverrides []func( - *cobra.Command, - *settings.DeleteDefaultNamespaceSettingRequest, -) - -func newDeleteDefaultNamespaceSetting() *cobra.Command { - cmd := &cobra.Command{} - - var deleteDefaultNamespaceSettingReq settings.DeleteDefaultNamespaceSettingRequest - - // TODO: short flags - - cmd.Flags().StringVar(&deleteDefaultNamespaceSettingReq.Etag, "etag", deleteDefaultNamespaceSettingReq.Etag, `etag used for versioning.`) - - cmd.Use = "delete-default-namespace-setting" - cmd.Short = `Delete the default namespace setting.` - cmd.Long = `Delete the default namespace setting. - - Deletes the default namespace setting for the workspace. A fresh etag needs to - be provided in DELETE requests (as a query parameter). The etag can be - retrieved by making a GET request before the DELETE request. If the setting is - updated/deleted concurrently, DELETE will fail with 409 and the request will - need to be retried by using the fresh etag in the 409 response.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - response, err := w.Settings.DeleteDefaultNamespaceSetting(ctx, deleteDefaultNamespaceSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteDefaultNamespaceSettingOverrides { - fn(cmd, &deleteDefaultNamespaceSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteDefaultNamespaceSetting()) - }) -} - -// start delete-restrict-workspace-admins-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deleteRestrictWorkspaceAdminsSettingOverrides []func( - *cobra.Command, - *settings.DeleteRestrictWorkspaceAdminsSettingRequest, -) - -func newDeleteRestrictWorkspaceAdminsSetting() *cobra.Command { - cmd := &cobra.Command{} - - var deleteRestrictWorkspaceAdminsSettingReq settings.DeleteRestrictWorkspaceAdminsSettingRequest - - // TODO: short flags - - cmd.Flags().StringVar(&deleteRestrictWorkspaceAdminsSettingReq.Etag, "etag", deleteRestrictWorkspaceAdminsSettingReq.Etag, `etag used for versioning.`) - - cmd.Use = "delete-restrict-workspace-admins-setting" - cmd.Short = `Delete the restrict workspace admins setting.` - cmd.Long = `Delete the restrict workspace admins setting. - - Reverts the restrict workspace admins setting status for the workspace. A - fresh etag needs to be provided in DELETE requests (as a query parameter). The - etag can be retrieved by making a GET request before the DELETE request. If - the setting is updated/deleted concurrently, DELETE will fail with 409 and the - request will need to be retried by using the fresh etag in the 409 response.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - response, err := w.Settings.DeleteRestrictWorkspaceAdminsSetting(ctx, deleteRestrictWorkspaceAdminsSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteRestrictWorkspaceAdminsSettingOverrides { - fn(cmd, &deleteRestrictWorkspaceAdminsSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteRestrictWorkspaceAdminsSetting()) - }) -} - -// start get-default-namespace-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getDefaultNamespaceSettingOverrides []func( - *cobra.Command, - *settings.GetDefaultNamespaceSettingRequest, -) - -func newGetDefaultNamespaceSetting() *cobra.Command { - cmd := &cobra.Command{} - - var getDefaultNamespaceSettingReq settings.GetDefaultNamespaceSettingRequest - - // TODO: short flags - - cmd.Flags().StringVar(&getDefaultNamespaceSettingReq.Etag, "etag", getDefaultNamespaceSettingReq.Etag, `etag used for versioning.`) - - cmd.Use = "get-default-namespace-setting" - cmd.Short = `Get the default namespace setting.` - cmd.Long = `Get the default namespace setting. - - Gets the default namespace setting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - response, err := w.Settings.GetDefaultNamespaceSetting(ctx, getDefaultNamespaceSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getDefaultNamespaceSettingOverrides { - fn(cmd, &getDefaultNamespaceSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetDefaultNamespaceSetting()) - }) -} - -// start get-restrict-workspace-admins-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getRestrictWorkspaceAdminsSettingOverrides []func( - *cobra.Command, - *settings.GetRestrictWorkspaceAdminsSettingRequest, -) - -func newGetRestrictWorkspaceAdminsSetting() *cobra.Command { - cmd := &cobra.Command{} - - var getRestrictWorkspaceAdminsSettingReq settings.GetRestrictWorkspaceAdminsSettingRequest - - // TODO: short flags - - cmd.Flags().StringVar(&getRestrictWorkspaceAdminsSettingReq.Etag, "etag", getRestrictWorkspaceAdminsSettingReq.Etag, `etag used for versioning.`) - - cmd.Use = "get-restrict-workspace-admins-setting" - cmd.Short = `Get the restrict workspace admins setting.` - cmd.Long = `Get the restrict workspace admins setting. - - Gets the restrict workspace admins setting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - response, err := w.Settings.GetRestrictWorkspaceAdminsSetting(ctx, getRestrictWorkspaceAdminsSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getRestrictWorkspaceAdminsSettingOverrides { - fn(cmd, &getRestrictWorkspaceAdminsSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetRestrictWorkspaceAdminsSetting()) - }) -} - -// start update-default-namespace-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updateDefaultNamespaceSettingOverrides []func( - *cobra.Command, - *settings.UpdateDefaultNamespaceSettingRequest, -) - -func newUpdateDefaultNamespaceSetting() *cobra.Command { - cmd := &cobra.Command{} - - var updateDefaultNamespaceSettingReq settings.UpdateDefaultNamespaceSettingRequest - var updateDefaultNamespaceSettingJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updateDefaultNamespaceSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Use = "update-default-namespace-setting" - cmd.Short = `Update the default namespace setting.` - cmd.Long = `Update the default namespace setting. - - Updates the default namespace setting for the workspace. A fresh etag needs to - be provided in PATCH requests (as part of the setting field). The etag can be - retrieved by making a GET request before the PATCH request. Note that if the - setting does not exist, GET will return a NOT_FOUND error and the etag will be - present in the error response, which should be set in the PATCH request. If - the setting is updated concurrently, PATCH will fail with 409 and the request - will need to be retried by using the fresh etag in the 409 response.` - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - err = updateDefaultNamespaceSettingJson.Unmarshal(&updateDefaultNamespaceSettingReq) - if err != nil { - return err - } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") - } - - response, err := w.Settings.UpdateDefaultNamespaceSetting(ctx, updateDefaultNamespaceSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range updateDefaultNamespaceSettingOverrides { - fn(cmd, &updateDefaultNamespaceSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateDefaultNamespaceSetting()) - }) -} - -// start update-restrict-workspace-admins-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updateRestrictWorkspaceAdminsSettingOverrides []func( - *cobra.Command, - *settings.UpdateRestrictWorkspaceAdminsSettingRequest, -) - -func newUpdateRestrictWorkspaceAdminsSetting() *cobra.Command { - cmd := &cobra.Command{} - - var updateRestrictWorkspaceAdminsSettingReq settings.UpdateRestrictWorkspaceAdminsSettingRequest - var updateRestrictWorkspaceAdminsSettingJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updateRestrictWorkspaceAdminsSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Use = "update-restrict-workspace-admins-setting" - cmd.Short = `Update the restrict workspace admins setting.` - cmd.Long = `Update the restrict workspace admins setting. - - Updates the restrict workspace admins setting for the workspace. A fresh etag - needs to be provided in PATCH requests (as part of the setting field). The - etag can be retrieved by making a GET request before the PATCH request. If the - setting is updated concurrently, PATCH will fail with 409 and the request will - need to be retried by using the fresh etag in the 409 response.` - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - err = updateRestrictWorkspaceAdminsSettingJson.Unmarshal(&updateRestrictWorkspaceAdminsSettingReq) - if err != nil { - return err - } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") - } - - response, err := w.Settings.UpdateRestrictWorkspaceAdminsSetting(ctx, updateRestrictWorkspaceAdminsSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range updateRestrictWorkspaceAdminsSettingOverrides { - fn(cmd, &updateRestrictWorkspaceAdminsSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateRestrictWorkspaceAdminsSetting()) - }) -} - // end service Settings diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index 2c0479a0a..b849f84f7 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -31,6 +31,15 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSharePermissions()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -118,12 +127,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -183,12 +186,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -250,12 +247,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -297,12 +288,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start share-permissions command // Slice with functions to override default command behavior. @@ -362,12 +347,6 @@ func newSharePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSharePermissions()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -452,12 +431,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -530,10 +503,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Shares diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 4a0d8f309..b763d1934 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -39,6 +39,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newValidate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -70,7 +78,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `Comment associated with the credential.`) - // TODO: output-only field + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Supplying true to this argument skips validation of the created credential.`) @@ -131,12 +139,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -210,12 +212,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -288,12 +284,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -352,12 +342,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -381,7 +365,7 @@ func newUpdate() *cobra.Command { // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`) - // TODO: output-only field + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`) @@ -448,12 +432,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start validate command // Slice with functions to override default command behavior. @@ -476,10 +454,10 @@ func newValidate() *cobra.Command { // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token - // TODO: output-only field + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().StringVar(&validateReq.ExternalLocationName, "external-location-name", validateReq.ExternalLocationName, `The name of an existing external location to validate.`) cmd.Flags().BoolVar(&validateReq.ReadOnly, "read-only", validateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) - // TODO: any: storage_credential_name + cmd.Flags().StringVar(&validateReq.StorageCredentialName, "storage-credential-name", validateReq.StorageCredentialName, `The name of the storage credential to validate.`) cmd.Flags().StringVar(&validateReq.Url, "url", validateReq.Url, `The external location url to validate.`) cmd.Use = "validate" @@ -537,10 +515,4 @@ func newValidate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newValidate()) - }) -} - // end service StorageCredentials diff --git a/cmd/workspace/system-schemas/system-schemas.go b/cmd/workspace/system-schemas/system-schemas.go index 9b2392a6e..d8135ac2a 100755 --- a/cmd/workspace/system-schemas/system-schemas.go +++ b/cmd/workspace/system-schemas/system-schemas.go @@ -28,6 +28,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDisable()) + cmd.AddCommand(newEnable()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -100,12 +105,6 @@ func newDisable() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDisable()) - }) -} - // start enable command // Slice with functions to override default command behavior. @@ -170,12 +169,6 @@ func newEnable() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEnable()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -232,10 +225,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service SystemSchemas diff --git a/cmd/workspace/table-constraints/table-constraints.go b/cmd/workspace/table-constraints/table-constraints.go index e17b95404..d5597ab33 100755 --- a/cmd/workspace/table-constraints/table-constraints.go +++ b/cmd/workspace/table-constraints/table-constraints.go @@ -39,6 +39,10 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -115,12 +119,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -196,10 +194,4 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // end service TableConstraints diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index d4e76587d..1ee6b0d52 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -35,6 +35,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newExists()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListSummaries()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -117,12 +125,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start exists command // Slice with functions to override default command behavior. @@ -200,12 +202,6 @@ func newExists() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newExists()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -284,12 +280,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -358,12 +348,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start list-summaries command // Slice with functions to override default command behavior. @@ -446,12 +430,6 @@ func newListSummaries() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListSummaries()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -539,10 +517,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Tables diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index 1c2e2c37c..5209ff16d 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -29,6 +29,16 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreateOboToken()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -130,12 +140,6 @@ func newCreateOboToken() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateOboToken()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -206,12 +210,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -282,12 +280,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -330,12 +322,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -379,12 +365,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -438,12 +418,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -509,12 +483,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -580,10 +548,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service TokenManagement diff --git a/cmd/workspace/tokens/tokens.go b/cmd/workspace/tokens/tokens.go index 5550acfa5..bdb99d601 100755 --- a/cmd/workspace/tokens/tokens.go +++ b/cmd/workspace/tokens/tokens.go @@ -28,6 +28,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -104,12 +109,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -203,12 +202,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -248,10 +241,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service Tokens diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index 078a712e4..676b10a08 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -37,6 +37,18 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -120,12 +132,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -197,12 +203,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -281,12 +281,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -329,12 +323,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -378,12 +366,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -442,12 +424,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -530,12 +506,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -601,12 +571,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -698,12 +662,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -769,10 +727,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Users diff --git a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go index d6863b660..a8d3d3ee8 100755 --- a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go +++ b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go @@ -28,6 +28,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreateEndpoint()) + cmd.AddCommand(newDeleteEndpoint()) + cmd.AddCommand(newGetEndpoint()) + cmd.AddCommand(newListEndpoints()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -142,12 +148,6 @@ func newCreateEndpoint() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateEndpoint()) - }) -} - // start delete-endpoint command // Slice with functions to override default command behavior. @@ -164,18 +164,17 @@ func newDeleteEndpoint() *cobra.Command { // TODO: short flags - cmd.Use = "delete-endpoint ENDPOINT_NAME NAME" + cmd.Use = "delete-endpoint ENDPOINT_NAME" cmd.Short = `Delete an endpoint.` cmd.Long = `Delete an endpoint. Arguments: - ENDPOINT_NAME: Name of the endpoint - NAME: Name of the endpoint to delete` + ENDPOINT_NAME: Name of the endpoint` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := cobra.ExactArgs(1) return check(cmd, args) } @@ -185,7 +184,6 @@ func newDeleteEndpoint() *cobra.Command { w := root.WorkspaceClient(ctx) deleteEndpointReq.EndpointName = args[0] - deleteEndpointReq.Name = args[1] err = w.VectorSearchEndpoints.DeleteEndpoint(ctx, deleteEndpointReq) if err != nil { @@ -206,12 +204,6 @@ func newDeleteEndpoint() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteEndpoint()) - }) -} - // start get-endpoint command // Slice with functions to override default command behavior. @@ -268,12 +260,6 @@ func newGetEndpoint() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetEndpoint()) - }) -} - // start list-endpoints command // Slice with functions to override default command behavior. @@ -324,10 +310,4 @@ func newListEndpoints() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListEndpoints()) - }) -} - // end service VectorSearchEndpoints diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index 6beca7d21..a9b9f51df 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -35,6 +35,16 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreateIndex()) + cmd.AddCommand(newDeleteDataVectorIndex()) + cmd.AddCommand(newDeleteIndex()) + cmd.AddCommand(newGetIndex()) + cmd.AddCommand(newListIndexes()) + cmd.AddCommand(newQueryIndex()) + cmd.AddCommand(newSyncIndex()) + cmd.AddCommand(newUpsertDataVectorIndex()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -63,9 +73,8 @@ func newCreateIndex() *cobra.Command { // TODO: complex arg: delta_sync_index_spec // TODO: complex arg: direct_access_index_spec - cmd.Flags().StringVar(&createIndexReq.EndpointName, "endpoint-name", createIndexReq.EndpointName, `Name of the endpoint to be used for serving the index.`) - cmd.Use = "create-index NAME PRIMARY_KEY INDEX_TYPE" + cmd.Use = "create-index NAME ENDPOINT_NAME PRIMARY_KEY INDEX_TYPE" cmd.Short = `Create an index.` cmd.Long = `Create an index. @@ -73,6 +82,7 @@ func newCreateIndex() *cobra.Command { Arguments: NAME: Name of the index + ENDPOINT_NAME: Name of the endpoint to be used for serving the index PRIMARY_KEY: Primary key of the index INDEX_TYPE: There are 2 types of Vector Search indexes: @@ -88,11 +98,11 @@ func newCreateIndex() *cobra.Command { if cmd.Flags().Changed("json") { err := cobra.ExactArgs(0)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'primary_key', 'index_type' in your JSON input") + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'endpoint_name', 'primary_key', 'index_type' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := cobra.ExactArgs(4) return check(cmd, args) } @@ -111,12 +121,15 @@ func newCreateIndex() *cobra.Command { createIndexReq.Name = args[0] } if !cmd.Flags().Changed("json") { - createIndexReq.PrimaryKey = args[1] + createIndexReq.EndpointName = args[1] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &createIndexReq.IndexType) + createIndexReq.PrimaryKey = args[2] + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[3], &createIndexReq.IndexType) if err != nil { - return fmt.Errorf("invalid INDEX_TYPE: %s", args[2]) + return fmt.Errorf("invalid INDEX_TYPE: %s", args[3]) } } @@ -139,12 +152,6 @@ func newCreateIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateIndex()) - }) -} - // start delete-data-vector-index command // Slice with functions to override default command behavior. @@ -163,14 +170,14 @@ func newDeleteDataVectorIndex() *cobra.Command { // TODO: short flags cmd.Flags().Var(&deleteDataVectorIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "delete-data-vector-index NAME" + cmd.Use = "delete-data-vector-index INDEX_NAME" cmd.Short = `Delete data from index.` cmd.Long = `Delete data from index. Handles the deletion of data from a specified vector index. Arguments: - NAME: Name of the vector index where data is to be deleted. Must be a Direct + INDEX_NAME: Name of the vector index where data is to be deleted. Must be a Direct Vector Access Index.` cmd.Annotations = make(map[string]string) @@ -193,7 +200,7 @@ func newDeleteDataVectorIndex() *cobra.Command { } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } - deleteDataVectorIndexReq.Name = args[0] + deleteDataVectorIndexReq.IndexName = args[0] response, err := w.VectorSearchIndexes.DeleteDataVectorIndex(ctx, deleteDataVectorIndexReq) if err != nil { @@ -214,12 +221,6 @@ func newDeleteDataVectorIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteDataVectorIndex()) - }) -} - // start delete-index command // Slice with functions to override default command behavior. @@ -278,12 +279,6 @@ func newDeleteIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteIndex()) - }) -} - // start get-index command // Slice with functions to override default command behavior. @@ -342,12 +337,6 @@ func newGetIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetIndex()) - }) -} - // start list-indexes command // Slice with functions to override default command behavior. @@ -405,12 +394,6 @@ func newListIndexes() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListIndexes()) - }) -} - // start query-index command // Slice with functions to override default command behavior. @@ -484,12 +467,6 @@ func newQueryIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newQueryIndex()) - }) -} - // start sync-index command // Slice with functions to override default command behavior. @@ -548,12 +525,6 @@ func newSyncIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSyncIndex()) - }) -} - // start upsert-data-vector-index command // Slice with functions to override default command behavior. @@ -572,14 +543,14 @@ func newUpsertDataVectorIndex() *cobra.Command { // TODO: short flags cmd.Flags().Var(&upsertDataVectorIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "upsert-data-vector-index NAME INPUTS_JSON" + cmd.Use = "upsert-data-vector-index INDEX_NAME INPUTS_JSON" cmd.Short = `Upsert data into an index.` cmd.Long = `Upsert data into an index. Handles the upserting of data into a specified vector index. Arguments: - NAME: Name of the vector index where data is to be upserted. Must be a Direct + INDEX_NAME: Name of the vector index where data is to be upserted. Must be a Direct Vector Access Index. INPUTS_JSON: JSON string representing the data to be upserted.` @@ -589,7 +560,7 @@ func newUpsertDataVectorIndex() *cobra.Command { if cmd.Flags().Changed("json") { err := cobra.ExactArgs(1)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, provide only NAME as positional arguments. Provide 'inputs_json' in your JSON input") + return fmt.Errorf("when --json flag is specified, provide only INDEX_NAME as positional arguments. Provide 'inputs_json' in your JSON input") } return nil } @@ -608,7 +579,7 @@ func newUpsertDataVectorIndex() *cobra.Command { return err } } - upsertDataVectorIndexReq.Name = args[0] + upsertDataVectorIndexReq.IndexName = args[0] if !cmd.Flags().Changed("json") { upsertDataVectorIndexReq.InputsJson = args[1] } @@ -632,10 +603,4 @@ func newUpsertDataVectorIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpsertDataVectorIndex()) - }) -} - // end service VectorSearchIndexes diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 12cafeaf8..5a2991b90 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -34,6 +34,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRead()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -152,12 +159,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -232,12 +233,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -308,12 +303,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start read command // Slice with functions to override default command behavior. @@ -389,12 +378,6 @@ func newRead() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRead()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -484,10 +467,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Volumes diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index 2e9282a85..3d1f05439 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -30,6 +30,21 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newEdit()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetWorkspaceWarehouseConfig()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newSetWorkspaceWarehouseConfig()) + cmd.AddCommand(newStart()) + cmd.AddCommand(newStop()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -138,12 +153,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -214,12 +223,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start edit command // Slice with functions to override default command behavior. @@ -336,12 +339,6 @@ func newEdit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEdit()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -417,12 +414,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -493,12 +484,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -570,12 +555,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start get-workspace-warehouse-config command // Slice with functions to override default command behavior. @@ -619,12 +598,6 @@ func newGetWorkspaceWarehouseConfig() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetWorkspaceWarehouseConfig()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -677,12 +650,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -764,12 +731,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start set-workspace-warehouse-config command // Slice with functions to override default command behavior. @@ -843,12 +804,6 @@ func newSetWorkspaceWarehouseConfig() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetWorkspaceWarehouseConfig()) - }) -} - // start start command // Slice with functions to override default command behavior. @@ -943,12 +898,6 @@ func newStart() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newStart()) - }) -} - // start stop command // Slice with functions to override default command behavior. @@ -1043,12 +992,6 @@ func newStop() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newStop()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -1130,10 +1073,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Warehouses diff --git a/cmd/workspace/workspace-bindings/workspace-bindings.go b/cmd/workspace/workspace-bindings/workspace-bindings.go index f8d31fa45..3543f1e9d 100755 --- a/cmd/workspace/workspace-bindings/workspace-bindings.go +++ b/cmd/workspace/workspace-bindings/workspace-bindings.go @@ -42,6 +42,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetBindings()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdateBindings()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -109,12 +115,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-bindings command // Slice with functions to override default command behavior. @@ -176,12 +176,6 @@ func newGetBindings() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetBindings()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -252,12 +246,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-bindings command // Slice with functions to override default command behavior. @@ -330,10 +318,4 @@ func newUpdateBindings() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateBindings()) - }) -} - // end service WorkspaceBindings diff --git a/cmd/workspace/workspace-conf/workspace-conf.go b/cmd/workspace/workspace-conf/workspace-conf.go index 99207ffad..87ea86c8e 100755 --- a/cmd/workspace/workspace-conf/workspace-conf.go +++ b/cmd/workspace/workspace-conf/workspace-conf.go @@ -27,6 +27,10 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGetStatus()) + cmd.AddCommand(newSetStatus()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -90,12 +94,6 @@ func newGetStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetStatus()) - }) -} - // start set-status command // Slice with functions to override default command behavior. @@ -156,10 +154,4 @@ func newSetStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetStatus()) - }) -} - // end service WorkspaceConf diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 4fb63f0c0..42517c432 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -31,6 +31,18 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newExport()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetStatus()) + cmd.AddCommand(newImport()) + cmd.AddCommand(newList()) + cmd.AddCommand(newMkdirs()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -138,12 +150,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start export command // Slice with functions to override default command behavior. @@ -231,12 +237,6 @@ func newExport() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newExport()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -297,12 +297,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -364,12 +358,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start get-status command // Slice with functions to override default command behavior. @@ -429,12 +417,6 @@ func newGetStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetStatus()) - }) -} - // start import command // Slice with functions to override default command behavior. @@ -528,12 +510,6 @@ func newImport() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newImport()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -593,12 +569,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start mkdirs command // Slice with functions to override default command behavior. @@ -696,12 +666,6 @@ func newMkdirs() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newMkdirs()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -773,12 +737,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -850,10 +808,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Workspace diff --git a/go.mod b/go.mod index 49521f64c..1f040cda0 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.33.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.34.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause @@ -20,7 +20,7 @@ require ( github.com/spf13/cobra v1.8.0 // Apache 2.0 github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.9.0 // MIT - golang.org/x/exp v0.0.0-20231006140011-7918f672742d + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.15.0 golang.org/x/oauth2 v0.17.0 golang.org/x/sync v0.6.0 @@ -32,7 +32,7 @@ require ( require gopkg.in/yaml.v3 v3.0.1 require ( - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/compute v1.23.4 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -54,18 +54,18 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.22.0 // indirect - go.opentelemetry.io/otel/trace v1.22.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect + go.opentelemetry.io/otel v1.23.0 // indirect + go.opentelemetry.io/otel/metric v1.23.0 // indirect + go.opentelemetry.io/otel/trace v1.23.0 // indirect golang.org/x/crypto v0.19.0 // indirect golang.org/x/net v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.161.0 // indirect + google.golang.org/api v0.166.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/grpc v1.60.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/grpc v1.61.1 // indirect google.golang.org/protobuf v1.32.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 4b273bc3c..de5f07cf7 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= +cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= @@ -28,8 +28,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.33.0 h1:0ldeP8aPnpKLV/mvNKsOVijOaLLo6TxRGdIwrEf2rlQ= -github.com/databricks/databricks-sdk-go v0.33.0/go.mod h1:yyXGdhEfXBBsIoTm0mdl8QN0xzCQPUVZTozMM/7wVuI= +github.com/databricks/databricks-sdk-go v0.34.0 h1:z4JjgcCk99jAGxx3JgkMsniJFtReWhtAxkgyvtdFqCs= +github.com/databricks/databricks-sdk-go v0.34.0/go.mod h1:MGNWVPqxYCW1vj/xD7DeLT8uChi4lgTFum+iIwDxd/Q= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -94,8 +94,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= +github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= @@ -160,24 +160,24 @@ github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= +go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= +go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= +go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= +go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= +go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= +go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -239,12 +239,12 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.161.0 h1:oYzk/bs26WN10AV7iU7MVJVXBH8oCPS2hHyBiEeFoSU= -google.golang.org/api v0.161.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw= +google.golang.org/api v0.166.0 h1:6m4NUwrZYhAaVIHZWxaKjw1L1vNAjtMwORmKRyEEo24= +google.golang.org/api v0.166.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -252,15 +252,15 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From a467d01f6f1df2e94a2e3583091c80bc8d4393a4 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 6 Mar 2024 12:30:23 +0100 Subject: [PATCH 067/286] Release v0.215.0 (#1257) CLI: * The SDK update fixes `fs cp` calls timing out when copying large files. Bundles: * Fix summary command when internal Terraform config doesn't exist ([#1242](https://github.com/databricks/cli/pull/1242)). * Configure cobra.NoArgs for bundle commands where applicable ([#1250](https://github.com/databricks/cli/pull/1250)). * Fixed building Python artifacts on Windows with WSL ([#1249](https://github.com/databricks/cli/pull/1249)). * Add `--validate-only` flag to run validate-only pipeline update ([#1251](https://github.com/databricks/cli/pull/1251)). * Only transform wheel libraries when using trampoline ([#1248](https://github.com/databricks/cli/pull/1248)). * Return `application_id` for service principal lookups ([#1245](https://github.com/databricks/cli/pull/1245)). * Support relative paths in artifact files source section and always upload all artifact files ([#1247](https://github.com/databricks/cli/pull/1247)). * Fix DBConnect support in VS Code ([#1253](https://github.com/databricks/cli/pull/1253)). Internal: * Added test to verify scripts.Execute mutator works correctly ([#1237](https://github.com/databricks/cli/pull/1237)). API Changes: * Added `databricks permission-migration` command group. * Updated nesting of the `databricks settings` and `databricks account settings commands` * Changed `databricks vector-search-endpoints delete-endpoint` command with new required argument order. * Changed `databricks vector-search-indexes create-index` command with new required argument order. * Changed `databricks vector-search-indexes delete-data-vector-index` command with new required argument order. * Changed `databricks vector-search-indexes upsert-data-vector-index` command with new required argument order. OpenAPI commit d855b30f25a06fe84f25214efa20e7f1fffcdf9e (2024-03-04) Dependency updates: * Bump github.com/stretchr/testify from 1.8.4 to 1.9.0 ([#1252](https://github.com/databricks/cli/pull/1252)). * Update Go SDK to v0.34.0 ([#1256](https://github.com/databricks/cli/pull/1256)). --- CHANGELOG.md | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 72a6608da..527030992 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,36 @@ # Version changelog +## 0.215.0 + +CLI: +* The SDK update fixes `fs cp` calls timing out when copying large files. + +Bundles: +* Fix summary command when internal Terraform config doesn't exist ([#1242](https://github.com/databricks/cli/pull/1242)). +* Configure cobra.NoArgs for bundle commands where applicable ([#1250](https://github.com/databricks/cli/pull/1250)). +* Fixed building Python artifacts on Windows with WSL ([#1249](https://github.com/databricks/cli/pull/1249)). +* Add `--validate-only` flag to run validate-only pipeline update ([#1251](https://github.com/databricks/cli/pull/1251)). +* Only transform wheel libraries when using trampoline ([#1248](https://github.com/databricks/cli/pull/1248)). +* Return `application_id` for service principal lookups ([#1245](https://github.com/databricks/cli/pull/1245)). +* Support relative paths in artifact files source section and always upload all artifact files ([#1247](https://github.com/databricks/cli/pull/1247)). +* Fix DBConnect support in VS Code ([#1253](https://github.com/databricks/cli/pull/1253)). + +Internal: +* Added test to verify scripts.Execute mutator works correctly ([#1237](https://github.com/databricks/cli/pull/1237)). + +API Changes: +* Added `databricks permission-migration` command group. +* Updated nesting of the `databricks settings` and `databricks account settings commands` +* Changed `databricks vector-search-endpoints delete-endpoint` command with new required argument order. +* Changed `databricks vector-search-indexes create-index` command with new required argument order. +* Changed `databricks vector-search-indexes delete-data-vector-index` command with new required argument order. +* Changed `databricks vector-search-indexes upsert-data-vector-index` command with new required argument order. + +OpenAPI commit d855b30f25a06fe84f25214efa20e7f1fffcdf9e (2024-03-04) + +Dependency updates: +* Bump github.com/stretchr/testify from 1.8.4 to 1.9.0 ([#1252](https://github.com/databricks/cli/pull/1252)). +* Update Go SDK to v0.34.0 ([#1256](https://github.com/databricks/cli/pull/1256)). ## 0.214.1 CLI: From c05c0cd9416b53e8c2554495d98c33acc7689347 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 7 Mar 2024 14:56:50 +0100 Subject: [PATCH 068/286] Include `dyn.Path` as argument to the visit callback function (#1260) ## Changes This change means the callback supplied to `dyn.Foreach` can introspect the path of the value it is being called for. It also prepares for allowing visiting path patterns where the exact path is not known upfront. ## Tests Unit tests. --- bundle/config/mutator/merge_job_clusters.go | 2 +- bundle/config/mutator/merge_job_tasks.go | 2 +- .../config/mutator/merge_pipeline_clusters.go | 2 +- bundle/config/mutator/rewrite_sync_paths.go | 4 +- bundle/config/root.go | 4 +- bundle/deploy/terraform/tfdyn/convert_job.go | 4 +- libs/dyn/merge/elements_by_key.go | 2 +- libs/dyn/visit.go | 4 +- libs/dyn/visit_get.go | 2 +- libs/dyn/visit_map.go | 8 +-- libs/dyn/visit_map_test.go | 50 +++++++++++++------ libs/dyn/visit_set.go | 2 +- 12 files changed, 52 insertions(+), 34 deletions(-) diff --git a/bundle/config/mutator/merge_job_clusters.go b/bundle/config/mutator/merge_job_clusters.go index e8378f480..9c99cfaad 100644 --- a/bundle/config/mutator/merge_job_clusters.go +++ b/bundle/config/mutator/merge_job_clusters.go @@ -35,7 +35,7 @@ func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) error { return v, nil } - return dyn.Map(v, "resources.jobs", dyn.Foreach(func(job dyn.Value) (dyn.Value, error) { + return dyn.Map(v, "resources.jobs", dyn.Foreach(func(_ dyn.Path, job dyn.Value) (dyn.Value, error) { return dyn.Map(job, "job_clusters", merge.ElementsByKey("job_cluster_key", m.jobClusterKey)) })) }) diff --git a/bundle/config/mutator/merge_job_tasks.go b/bundle/config/mutator/merge_job_tasks.go index 7394368ab..91aee3a03 100644 --- a/bundle/config/mutator/merge_job_tasks.go +++ b/bundle/config/mutator/merge_job_tasks.go @@ -35,7 +35,7 @@ func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) error { return v, nil } - return dyn.Map(v, "resources.jobs", dyn.Foreach(func(job dyn.Value) (dyn.Value, error) { + return dyn.Map(v, "resources.jobs", dyn.Foreach(func(_ dyn.Path, job dyn.Value) (dyn.Value, error) { return dyn.Map(job, "tasks", merge.ElementsByKey("task_key", m.taskKeyString)) })) }) diff --git a/bundle/config/mutator/merge_pipeline_clusters.go b/bundle/config/mutator/merge_pipeline_clusters.go index 777ce611b..552d997b9 100644 --- a/bundle/config/mutator/merge_pipeline_clusters.go +++ b/bundle/config/mutator/merge_pipeline_clusters.go @@ -38,7 +38,7 @@ func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) err return v, nil } - return dyn.Map(v, "resources.pipelines", dyn.Foreach(func(pipeline dyn.Value) (dyn.Value, error) { + return dyn.Map(v, "resources.pipelines", dyn.Foreach(func(_ dyn.Path, pipeline dyn.Value) (dyn.Value, error) { return dyn.Map(pipeline, "clusters", merge.ElementsByKey("label", m.clusterLabel)) })) }) diff --git a/bundle/config/mutator/rewrite_sync_paths.go b/bundle/config/mutator/rewrite_sync_paths.go index c1761690d..5e17b1b5f 100644 --- a/bundle/config/mutator/rewrite_sync_paths.go +++ b/bundle/config/mutator/rewrite_sync_paths.go @@ -30,7 +30,7 @@ func (m *rewriteSyncPaths) Name() string { // // Then the resulting value will be "bar/somefile.*". func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc { - return func(v dyn.Value) (dyn.Value, error) { + return func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { dir := filepath.Dir(v.Location().File) rel, err := filepath.Rel(root, dir) if err != nil { @@ -43,7 +43,7 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc { func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) error { return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - return dyn.Map(v, "sync", func(v dyn.Value) (nv dyn.Value, err error) { + return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) { v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.Config.Path))) if err != nil { return dyn.NilValue, err diff --git a/bundle/config/root.go b/bundle/config/root.go index c8b6c5999..8e1ff6507 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -409,14 +409,14 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { } // For each target, rewrite the variables block. - return dyn.Map(v, "targets", dyn.Foreach(func(target dyn.Value) (dyn.Value, error) { + return dyn.Map(v, "targets", dyn.Foreach(func(_ dyn.Path, target dyn.Value) (dyn.Value, error) { // Confirm it has a variables block. if target.Get("variables") == dyn.NilValue { return target, nil } // For each variable, normalize its contents if it is a single string. - return dyn.Map(target, "variables", dyn.Foreach(func(variable dyn.Value) (dyn.Value, error) { + return dyn.Map(target, "variables", dyn.Foreach(func(_ dyn.Path, variable dyn.Value) (dyn.Value, error) { if variable.Kind() != dyn.KindString { return variable, nil } diff --git a/bundle/deploy/terraform/tfdyn/convert_job.go b/bundle/deploy/terraform/tfdyn/convert_job.go index b488df157..778af1adc 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job.go +++ b/bundle/deploy/terraform/tfdyn/convert_job.go @@ -30,7 +30,7 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { } // Modify keys in the "git_source" block - vout, err = dyn.Map(vout, "git_source", func(v dyn.Value) (dyn.Value, error) { + vout, err = dyn.Map(vout, "git_source", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return renameKeys(v, map[string]string{ "git_branch": "branch", "git_commit": "commit", @@ -44,7 +44,7 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { } // Modify keys in the "task" blocks - vout, err = dyn.Map(vout, "task", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + vout, err = dyn.Map(vout, "task", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return renameKeys(v, map[string]string{ "libraries": "library", }) diff --git a/libs/dyn/merge/elements_by_key.go b/libs/dyn/merge/elements_by_key.go index 3ce571bf7..da20ee849 100644 --- a/libs/dyn/merge/elements_by_key.go +++ b/libs/dyn/merge/elements_by_key.go @@ -7,7 +7,7 @@ type elementsByKey struct { keyFunc func(dyn.Value) string } -func (e elementsByKey) Map(v dyn.Value) (dyn.Value, error) { +func (e elementsByKey) Map(_ dyn.Path, v dyn.Value) (dyn.Value, error) { // We know the type of this value is a sequence. // For additional defence, return self if it is not. elements, ok := v.AsSequence() diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index 077fd51c5..d1a8d73b6 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -44,7 +44,7 @@ type visitOptions struct { // // If this function returns an error, the original visit function call // returns this error and the value is left unmodified. - fn func(Value) (Value, error) + fn func(Path, Value) (Value, error) // If set, tolerate the absence of the last component in the path. // This option is needed to set a key in a map that is not yet present. @@ -53,7 +53,7 @@ type visitOptions struct { func visit(v Value, prefix, suffix Path, opts visitOptions) (Value, error) { if len(suffix) == 0 { - return opts.fn(v) + return opts.fn(prefix, v) } // Initialize prefix if it is empty. diff --git a/libs/dyn/visit_get.go b/libs/dyn/visit_get.go index a0f848cdd..8b083fc6b 100644 --- a/libs/dyn/visit_get.go +++ b/libs/dyn/visit_get.go @@ -15,7 +15,7 @@ func Get(v Value, path string) (Value, error) { func GetByPath(v Value, p Path) (Value, error) { out := InvalidValue _, err := visit(v, EmptyPath, p, visitOptions{ - fn: func(ev Value) (Value, error) { + fn: func(_ Path, ev Value) (Value, error) { // Capture the value argument to return it. out = ev return ev, nil diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index ed89baa4a..e6053d9d1 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -7,18 +7,18 @@ import ( ) // MapFunc is a function that maps a value to another value. -type MapFunc func(Value) (Value, error) +type MapFunc func(Path, Value) (Value, error) // Foreach returns a [MapFunc] that applies the specified [MapFunc] to each // value in a map or sequence and returns the new map or sequence. func Foreach(fn MapFunc) MapFunc { - return func(v Value) (Value, error) { + return func(p Path, v Value) (Value, error) { switch v.Kind() { case KindMap: m := maps.Clone(v.MustMap()) for key, value := range m { var err error - m[key], err = fn(value) + m[key], err = fn(p.Append(Key(key)), value) if err != nil { return InvalidValue, err } @@ -28,7 +28,7 @@ func Foreach(fn MapFunc) MapFunc { s := slices.Clone(v.MustSequence()) for i, value := range s { var err error - s[i], err = fn(value) + s[i], err = fn(p.Append(Index(i)), value) if err != nil { return InvalidValue, err } diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index 117d03f0a..2659b71f7 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -12,7 +12,7 @@ import ( func TestMapWithEmptyPath(t *testing.T) { // An empty path means to return the value itself. vin := dyn.V(42) - vout, err := dyn.MapByPath(dyn.InvalidValue, dyn.EmptyPath, func(v dyn.Value) (dyn.Value, error) { + vout, err := dyn.MapByPath(dyn.InvalidValue, dyn.EmptyPath, func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return vin, nil }) assert.NoError(t, err) @@ -45,7 +45,7 @@ func TestMapFuncOnMap(t *testing.T) { // Note: in the test cases below we implicitly test that the original // value is not modified as we repeatedly set values on it. - vfoo, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(v dyn.Value) (dyn.Value, error) { + vfoo, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { assert.Equal(t, dyn.V(42), v) return dyn.V(44), nil }) @@ -55,7 +55,7 @@ func TestMapFuncOnMap(t *testing.T) { "bar": 43, }, vfoo.AsAny()) - vbar, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("bar")), func(v dyn.Value) (dyn.Value, error) { + vbar, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("bar")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { assert.Equal(t, dyn.V(43), v) return dyn.V(45), nil }) @@ -67,7 +67,7 @@ func TestMapFuncOnMap(t *testing.T) { // Return error from map function. var ref = fmt.Errorf("error") - verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(v dyn.Value) (dyn.Value, error) { + verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) assert.Equal(t, dyn.InvalidValue, verr) @@ -88,7 +88,7 @@ func TestMapFuncOnMapWithEmptySequence(t *testing.T) { }) for j := 0; j < len(variants); j++ { - vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("key")), func(v dyn.Value) (dyn.Value, error) { + vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("key")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return variants[j], nil }) assert.NoError(t, err) @@ -115,14 +115,14 @@ func TestMapFuncOnSequence(t *testing.T) { // Note: in the test cases below we implicitly test that the original // value is not modified as we repeatedly set values on it. - v0, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(v dyn.Value) (dyn.Value, error) { + v0, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { assert.Equal(t, dyn.V(42), v) return dyn.V(44), nil }) assert.NoError(t, err) assert.Equal(t, []any{44, 43}, v0.AsAny()) - v1, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(1)), func(v dyn.Value) (dyn.Value, error) { + v1, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(1)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { assert.Equal(t, dyn.V(43), v) return dyn.V(45), nil }) @@ -131,7 +131,7 @@ func TestMapFuncOnSequence(t *testing.T) { // Return error from map function. var ref = fmt.Errorf("error") - verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(v dyn.Value) (dyn.Value, error) { + verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) assert.Equal(t, dyn.InvalidValue, verr) @@ -152,7 +152,7 @@ func TestMapFuncOnSequenceWithEmptySequence(t *testing.T) { }) for j := 0; j < len(variants); j++ { - vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(v dyn.Value) (dyn.Value, error) { + vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return variants[j], nil }) assert.NoError(t, err) @@ -170,10 +170,19 @@ func TestMapForeachOnMap(t *testing.T) { var err error // Run foreach, adding 1 to each of the elements. - vout, err := dyn.Map(vin, ".", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + vout, err := dyn.Map(vin, ".", dyn.Foreach(func(p dyn.Path, v dyn.Value) (dyn.Value, error) { i, ok := v.AsInt() require.True(t, ok, "expected an integer") - return dyn.V(int(i) + 1), nil + switch p[0].Key() { + case "foo": + assert.EqualValues(t, 42, i) + return dyn.V(43), nil + case "bar": + assert.EqualValues(t, 43, i) + return dyn.V(44), nil + default: + return dyn.InvalidValue, fmt.Errorf("unexpected key %q", p[0].Key()) + } })) assert.NoError(t, err) assert.Equal(t, map[string]any{ @@ -196,7 +205,7 @@ func TestMapForeachOnMapError(t *testing.T) { // Check that an error from the map function propagates. var ref = fmt.Errorf("error") - _, err := dyn.Map(vin, ".", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) assert.ErrorIs(t, err, ref) @@ -211,10 +220,19 @@ func TestMapForeachOnSequence(t *testing.T) { var err error // Run foreach, adding 1 to each of the elements. - vout, err := dyn.Map(vin, ".", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + vout, err := dyn.Map(vin, ".", dyn.Foreach(func(p dyn.Path, v dyn.Value) (dyn.Value, error) { i, ok := v.AsInt() require.True(t, ok, "expected an integer") - return dyn.V(int(i) + 1), nil + switch p[0].Index() { + case 0: + assert.EqualValues(t, 42, i) + return dyn.V(43), nil + case 1: + assert.EqualValues(t, 43, i) + return dyn.V(44), nil + default: + return dyn.InvalidValue, fmt.Errorf("unexpected index %d", p[0].Index()) + } })) assert.NoError(t, err) assert.Equal(t, []any{43, 44}, vout.AsAny()) @@ -231,7 +249,7 @@ func TestMapForeachOnSequenceError(t *testing.T) { // Check that an error from the map function propagates. var ref = fmt.Errorf("error") - _, err := dyn.Map(vin, ".", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) assert.ErrorIs(t, err, ref) @@ -241,7 +259,7 @@ func TestMapForeachOnOtherError(t *testing.T) { vin := dyn.V(42) // Check that if foreach is applied to something other than a map or a sequence, it returns an error. - _, err := dyn.Map(vin, ".", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, nil })) assert.ErrorContains(t, err, "expected a map or sequence, found int") diff --git a/libs/dyn/visit_set.go b/libs/dyn/visit_set.go index fdbf41c2c..3ad770f08 100644 --- a/libs/dyn/visit_set.go +++ b/libs/dyn/visit_set.go @@ -15,7 +15,7 @@ func Set(v Value, path string, nv Value) (Value, error) { // If the path doesn't exist, it returns InvalidValue and an error. func SetByPath(v Value, p Path, nv Value) (Value, error) { return visit(v, EmptyPath, p, visitOptions{ - fn: func(_ Value) (Value, error) { + fn: func(_ Path, _ Value) (Value, error) { // Return the incoming value to set it. return nv, nil }, From 16a4c711e2b20ba16b2088950e3bcf63fab93798 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 7 Mar 2024 15:13:04 +0100 Subject: [PATCH 069/286] Inline logic to set a value in `dyn.SetByPath` (#1261) ## Changes This removes the need for the `allowMissingKeyInMap` option to the private `visit` function and ensures that the body of the visit function doesn't add or remove values of the configuration it traverses. This in turn prepares for visiting a path pattern that yields more than one callback, which doesn't match well with the now-removed option. ## Tests Unit tests pass and fully cover the inlined code. --- libs/dyn/visit.go | 6 +--- libs/dyn/visit_set.go | 64 +++++++++++++++++++++++++++++++++++++++---- 2 files changed, 60 insertions(+), 10 deletions(-) diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index d1a8d73b6..ef055e400 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -45,10 +45,6 @@ type visitOptions struct { // If this function returns an error, the original visit function call // returns this error and the value is left unmodified. fn func(Path, Value) (Value, error) - - // If set, tolerate the absence of the last component in the path. - // This option is needed to set a key in a map that is not yet present. - allowMissingKeyInMap bool } func visit(v Value, prefix, suffix Path, opts visitOptions) (Value, error) { @@ -76,7 +72,7 @@ func visit(v Value, prefix, suffix Path, opts visitOptions) (Value, error) { // Lookup current value in the map. ev, ok := m[component.key] - if !ok && !opts.allowMissingKeyInMap { + if !ok { return InvalidValue, noSuchKeyError{prefix} } diff --git a/libs/dyn/visit_set.go b/libs/dyn/visit_set.go index 3ad770f08..d0361981a 100644 --- a/libs/dyn/visit_set.go +++ b/libs/dyn/visit_set.go @@ -1,5 +1,11 @@ package dyn +import ( + "fmt" + "maps" + "slices" +) + // Set assigns a new value at the specified path in the specified value. // It is identical to [SetByPath], except that it takes a string path instead of a [Path]. func Set(v Value, path string, nv Value) (Value, error) { @@ -14,11 +20,59 @@ func Set(v Value, path string, nv Value) (Value, error) { // If successful, it returns the new value with all intermediate values copied and updated. // If the path doesn't exist, it returns InvalidValue and an error. func SetByPath(v Value, p Path, nv Value) (Value, error) { - return visit(v, EmptyPath, p, visitOptions{ - fn: func(_ Path, _ Value) (Value, error) { - // Return the incoming value to set it. - return nv, nil + lp := len(p) + if lp == 0 { + return nv, nil + } + + parent := p[:lp-1] + component := p[lp-1] + + return visit(v, EmptyPath, parent, visitOptions{ + fn: func(prefix Path, v Value) (Value, error) { + path := prefix.Append(component) + + switch { + case component.isKey(): + // Expect a map to be set if this is a key. + m, ok := v.AsMap() + if !ok { + return InvalidValue, fmt.Errorf("expected a map to index %q, found %s", path, v.Kind()) + } + + // Return an updated map value. + m = maps.Clone(m) + m[component.key] = nv + return Value{ + v: m, + k: KindMap, + l: v.l, + }, nil + + case component.isIndex(): + // Expect a sequence to be set if this is an index. + s, ok := v.AsSequence() + if !ok { + return InvalidValue, fmt.Errorf("expected a sequence to index %q, found %s", path, v.Kind()) + } + + // Lookup current value in the sequence. + if component.index < 0 || component.index >= len(s) { + return InvalidValue, indexOutOfBoundsError{prefix} + } + + // Return an updated sequence value. + s = slices.Clone(s) + s[component.index] = nv + return Value{ + v: s, + k: KindSequence, + l: v.l, + }, nil + + default: + panic("invalid component") + } }, - allowMissingKeyInMap: true, }) } From c950826ac1550d62c08da63a15ac8397111e44ac Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 8 Mar 2024 11:48:40 +0100 Subject: [PATCH 070/286] Add assertions for the `dyn.Path` argument to the visit callback (#1265) ## Changes The `dyn.Path` argument wasn't tested and could regress. Spotted this while working on related code. Follow up to #1260. ## Tests Unit tests. --- libs/dyn/visit_map_test.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index 2659b71f7..2be996fba 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -45,7 +45,8 @@ func TestMapFuncOnMap(t *testing.T) { // Note: in the test cases below we implicitly test that the original // value is not modified as we repeatedly set values on it. - vfoo, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + vfoo, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Key("foo")), p) assert.Equal(t, dyn.V(42), v) return dyn.V(44), nil }) @@ -55,7 +56,8 @@ func TestMapFuncOnMap(t *testing.T) { "bar": 43, }, vfoo.AsAny()) - vbar, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("bar")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + vbar, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("bar")), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Key("bar")), p) assert.Equal(t, dyn.V(43), v) return dyn.V(45), nil }) @@ -115,14 +117,16 @@ func TestMapFuncOnSequence(t *testing.T) { // Note: in the test cases below we implicitly test that the original // value is not modified as we repeatedly set values on it. - v0, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + v0, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Index(0)), p) assert.Equal(t, dyn.V(42), v) return dyn.V(44), nil }) assert.NoError(t, err) assert.Equal(t, []any{44, 43}, v0.AsAny()) - v1, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(1)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + v1, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(1)), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Index(1)), p) assert.Equal(t, dyn.V(43), v) return dyn.V(45), nil }) From 2453cd49d9d1d30c66b611464762727ea4b33b10 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 8 Mar 2024 15:33:01 +0100 Subject: [PATCH 071/286] Add `dyn.MapByPattern` to map a function to values with matching paths (#1266) ## Changes The new `dyn.Pattern` type represents a path pattern that can match one or more paths in a configuration tree. Every `dyn.Path` can be converted to a `dyn.Pattern` that matches only a single path. To accommodate this change, the visit function needed to be modified to take a `dyn.Pattern` suffix. Every component in the pattern implements an interface to work with the visit function. This function can recurse on the visit function for one or more elements of the value being visited. For patterns derived from a `dyn.Path`, it will work as it did before and select the matching element. For the new pattern components (e.g. `dyn.AnyKey` or `dyn.AnyIndex`), it recurses on all the elements in the container. ## Tests Unit tests. Confirmed full coverage for the new code. --- libs/dyn/pattern.go | 96 ++++++++++++++++++++++++++++++++++ libs/dyn/pattern_test.go | 28 ++++++++++ libs/dyn/visit.go | 22 +++++--- libs/dyn/visit_get.go | 2 +- libs/dyn/visit_map.go | 20 ++++--- libs/dyn/visit_map_test.go | 104 +++++++++++++++++++++++++++++++++++++ libs/dyn/visit_set.go | 4 +- 7 files changed, 258 insertions(+), 18 deletions(-) create mode 100644 libs/dyn/pattern.go create mode 100644 libs/dyn/pattern_test.go diff --git a/libs/dyn/pattern.go b/libs/dyn/pattern.go new file mode 100644 index 000000000..7e8b5d6e9 --- /dev/null +++ b/libs/dyn/pattern.go @@ -0,0 +1,96 @@ +package dyn + +import ( + "fmt" + "maps" + "slices" +) + +// Pattern represents a matcher for paths in a [Value] configuration tree. +// It is used by [MapByPattern] to apply a function to the values whose paths match the pattern. +// Every [Path] is a valid [Pattern] that matches a single unique path. +// The reverse is not true; not every [Pattern] is a valid [Path], as patterns may contain wildcards. +type Pattern []patternComponent + +// A pattern component can visit a [Value] and recursively call into [visit] for matching elements. +// Fixed components can match a single key or index, while wildcards can match any key or index. +type patternComponent interface { + visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) +} + +// NewPattern returns a new pattern from the given components. +// The individual components may be created with [Key], [Index], or [Any]. +func NewPattern(cs ...patternComponent) Pattern { + return cs +} + +// NewPatternFromPath returns a new pattern from the given path. +func NewPatternFromPath(p Path) Pattern { + cs := make(Pattern, len(p)) + for i, c := range p { + cs[i] = c + } + return cs +} + +type anyKeyComponent struct{} + +// AnyKey returns a pattern component that matches any key. +func AnyKey() patternComponent { + return anyKeyComponent{} +} + +// This function implements the patternComponent interface. +func (c anyKeyComponent) visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) { + m, ok := v.AsMap() + if !ok { + return InvalidValue, fmt.Errorf("expected a map at %q, found %s", prefix, v.Kind()) + } + + m = maps.Clone(m) + for key, value := range m { + var err error + nv, err := visit(value, prefix.Append(Key(key)), suffix, opts) + if err != nil { + // Leave the value intact if the suffix pattern didn't match any value. + if IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { + continue + } + return InvalidValue, err + } + m[key] = nv + } + + return NewValue(m, v.Location()), nil +} + +type anyIndexComponent struct{} + +// AnyIndex returns a pattern component that matches any index. +func AnyIndex() patternComponent { + return anyIndexComponent{} +} + +// This function implements the patternComponent interface. +func (c anyIndexComponent) visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) { + s, ok := v.AsSequence() + if !ok { + return InvalidValue, fmt.Errorf("expected a sequence at %q, found %s", prefix, v.Kind()) + } + + s = slices.Clone(s) + for i, value := range s { + var err error + nv, err := visit(value, prefix.Append(Index(i)), suffix, opts) + if err != nil { + // Leave the value intact if the suffix pattern didn't match any value. + if IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { + continue + } + return InvalidValue, err + } + s[i] = nv + } + + return NewValue(s, v.Location()), nil +} diff --git a/libs/dyn/pattern_test.go b/libs/dyn/pattern_test.go new file mode 100644 index 000000000..b91af8293 --- /dev/null +++ b/libs/dyn/pattern_test.go @@ -0,0 +1,28 @@ +package dyn_test + +import ( + "testing" + + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/assert" +) + +func TestNewPattern(t *testing.T) { + pat := dyn.NewPattern( + dyn.Key("foo"), + dyn.Index(1), + ) + + assert.Len(t, pat, 2) +} + +func TestNewPatternFromPath(t *testing.T) { + path := dyn.NewPath( + dyn.Key("foo"), + dyn.Index(1), + ) + + pat1 := dyn.NewPattern(dyn.Key("foo"), dyn.Index(1)) + pat2 := dyn.NewPatternFromPath(path) + assert.Equal(t, pat1, pat2) +} diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index ef055e400..ffd8323d4 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -47,7 +47,7 @@ type visitOptions struct { fn func(Path, Value) (Value, error) } -func visit(v Value, prefix, suffix Path, opts visitOptions) (Value, error) { +func visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) { if len(suffix) == 0 { return opts.fn(prefix, v) } @@ -59,25 +59,31 @@ func visit(v Value, prefix, suffix Path, opts visitOptions) (Value, error) { } component := suffix[0] - prefix = prefix.Append(component) suffix = suffix[1:] + // Visit the value with the current component. + return component.visit(v, prefix, suffix, opts) +} + +func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) { + path := prefix.Append(component) + switch { case component.isKey(): // Expect a map to be set if this is a key. m, ok := v.AsMap() if !ok { - return InvalidValue, fmt.Errorf("expected a map to index %q, found %s", prefix, v.Kind()) + return InvalidValue, fmt.Errorf("expected a map to index %q, found %s", path, v.Kind()) } // Lookup current value in the map. ev, ok := m[component.key] if !ok { - return InvalidValue, noSuchKeyError{prefix} + return InvalidValue, noSuchKeyError{path} } // Recursively transform the value. - nv, err := visit(ev, prefix, suffix, opts) + nv, err := visit(ev, path, suffix, opts) if err != nil { return InvalidValue, err } @@ -100,17 +106,17 @@ func visit(v Value, prefix, suffix Path, opts visitOptions) (Value, error) { // Expect a sequence to be set if this is an index. s, ok := v.AsSequence() if !ok { - return InvalidValue, fmt.Errorf("expected a sequence to index %q, found %s", prefix, v.Kind()) + return InvalidValue, fmt.Errorf("expected a sequence to index %q, found %s", path, v.Kind()) } // Lookup current value in the sequence. if component.index < 0 || component.index >= len(s) { - return InvalidValue, indexOutOfBoundsError{prefix} + return InvalidValue, indexOutOfBoundsError{path} } // Recursively transform the value. ev := s[component.index] - nv, err := visit(ev, prefix, suffix, opts) + nv, err := visit(ev, path, suffix, opts) if err != nil { return InvalidValue, err } diff --git a/libs/dyn/visit_get.go b/libs/dyn/visit_get.go index 8b083fc6b..101c38aff 100644 --- a/libs/dyn/visit_get.go +++ b/libs/dyn/visit_get.go @@ -14,7 +14,7 @@ func Get(v Value, path string) (Value, error) { // If the path doesn't exist, it returns InvalidValue and an error. func GetByPath(v Value, p Path) (Value, error) { out := InvalidValue - _, err := visit(v, EmptyPath, p, visitOptions{ + _, err := visit(v, EmptyPath, NewPatternFromPath(p), visitOptions{ fn: func(_ Path, ev Value) (Value, error) { // Capture the value argument to return it. out = ev diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index e6053d9d1..05d17c737 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -40,7 +40,7 @@ func Foreach(fn MapFunc) MapFunc { } } -// Map applies the given function to the value at the specified path in the specified value. +// Map applies a function to the value at the given path in the given value. // It is identical to [MapByPath], except that it takes a string path instead of a [Path]. func Map(v Value, path string, fn MapFunc) (Value, error) { p, err := NewPathFromString(path) @@ -50,15 +50,21 @@ func Map(v Value, path string, fn MapFunc) (Value, error) { return MapByPath(v, p, fn) } -// Map applies the given function to the value at the specified path in the specified value. +// MapByPath applies a function to the value at the given path in the given value. +// It is identical to [MapByPattern], except that it takes a [Path] instead of a [Pattern]. +// This means it only matches a single value, not a pattern of values. +func MapByPath(v Value, p Path, fn MapFunc) (Value, error) { + return MapByPattern(v, NewPatternFromPath(p), fn) +} + +// MapByPattern applies a function to the values whose paths match the given pattern in the given value. // If successful, it returns the new value with all intermediate values copied and updated. // -// If the path contains a key that doesn't exist, or an index that is out of bounds, -// it returns the original value and no error. This is because setting a value at a path -// that doesn't exist is a no-op. +// If the pattern contains a key that doesn't exist, or an index that is out of bounds, +// it returns the original value and no error. // -// If the path is invalid for the given value, it returns InvalidValue and an error. -func MapByPath(v Value, p Path, fn MapFunc) (Value, error) { +// If the pattern is invalid for the given value, it returns InvalidValue and an error. +func MapByPattern(v Value, p Pattern, fn MapFunc) (Value, error) { nv, err := visit(v, EmptyPath, p, visitOptions{ fn: fn, }) diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index 2be996fba..f87f0a40d 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -268,3 +268,107 @@ func TestMapForeachOnOtherError(t *testing.T) { })) assert.ErrorContains(t, err, "expected a map or sequence, found int") } + +func TestMapByPatternOnNilValue(t *testing.T) { + var err error + _, err = dyn.MapByPattern(dyn.NilValue, dyn.NewPattern(dyn.AnyKey()), nil) + assert.ErrorContains(t, err, `expected a map at "", found nil`) + _, err = dyn.MapByPattern(dyn.NilValue, dyn.NewPattern(dyn.AnyIndex()), nil) + assert.ErrorContains(t, err, `expected a sequence at "", found nil`) +} + +func TestMapByPatternOnMap(t *testing.T) { + vin := dyn.V(map[string]dyn.Value{ + "a": dyn.V(map[string]dyn.Value{ + "b": dyn.V(42), + }), + "b": dyn.V(map[string]dyn.Value{ + "c": dyn.V(43), + }), + }) + + var err error + + // Expect an error if the pattern structure doesn't match the value structure. + _, err = dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyKey(), dyn.Index(0)), nil) + assert.ErrorContains(t, err, `expected a sequence to index`) + + // Apply function to pattern "*.b". + vout, err := dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyKey(), dyn.Key("b")), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Key("a"), dyn.Key("b")), p) + assert.Equal(t, dyn.V(42), v) + return dyn.V(44), nil + }) + assert.NoError(t, err) + assert.Equal(t, map[string]any{ + "a": map[string]any{ + "b": 44, + }, + "b": map[string]any{ + "c": 43, + }, + }, vout.AsAny()) +} + +func TestMapByPatternOnMapWithoutMatch(t *testing.T) { + vin := dyn.V(map[string]dyn.Value{ + "a": dyn.V(map[string]dyn.Value{ + "b": dyn.V(42), + }), + "b": dyn.V(map[string]dyn.Value{ + "c": dyn.V(43), + }), + }) + + // Apply function to pattern "*.zzz". + vout, err := dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyKey(), dyn.Key("zzz")), nil) + assert.NoError(t, err) + assert.Equal(t, vin, vout) +} + +func TestMapByPatternOnSequence(t *testing.T) { + vin := dyn.V([]dyn.Value{ + dyn.V([]dyn.Value{ + dyn.V(42), + }), + dyn.V([]dyn.Value{ + dyn.V(43), + dyn.V(44), + }), + }) + + var err error + + // Expect an error if the pattern structure doesn't match the value structure. + _, err = dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyIndex(), dyn.Key("a")), nil) + assert.ErrorContains(t, err, `expected a map to index`) + + // Apply function to pattern "*.c". + vout, err := dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyIndex(), dyn.Index(1)), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Index(1), dyn.Index(1)), p) + assert.Equal(t, dyn.V(44), v) + return dyn.V(45), nil + }) + assert.NoError(t, err) + assert.Equal(t, []any{ + []any{42}, + []any{43, 45}, + }, vout.AsAny()) +} + +func TestMapByPatternOnSequenceWithoutMatch(t *testing.T) { + vin := dyn.V([]dyn.Value{ + dyn.V([]dyn.Value{ + dyn.V(42), + }), + dyn.V([]dyn.Value{ + dyn.V(43), + dyn.V(44), + }), + }) + + // Apply function to pattern "*.zzz". + vout, err := dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyIndex(), dyn.Index(42)), nil) + assert.NoError(t, err) + assert.Equal(t, vin, vout) +} diff --git a/libs/dyn/visit_set.go b/libs/dyn/visit_set.go index d0361981a..b22c3da4a 100644 --- a/libs/dyn/visit_set.go +++ b/libs/dyn/visit_set.go @@ -25,10 +25,10 @@ func SetByPath(v Value, p Path, nv Value) (Value, error) { return nv, nil } - parent := p[:lp-1] component := p[lp-1] + p = p[:lp-1] - return visit(v, EmptyPath, parent, visitOptions{ + return visit(v, EmptyPath, NewPatternFromPath(p), visitOptions{ fn: func(prefix Path, v Value) (Value, error) { path := prefix.Append(component) From 22d18d2cc7c626199d2dc45f7e0461e1dcf11d21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Mar 2024 13:08:58 +0100 Subject: [PATCH 072/286] Bump golang.org/x/oauth2 from 0.17.0 to 0.18.0 (#1270) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.17.0 to 0.18.0.
Commits
  • 85231f9 go.mod: update golang.org/x dependencies
  • 34a7afa google/externalaccount: add Config.UniverseDomain
  • 95bec95 google/externalaccount: moves externalaccount package out of internal and exp...
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/oauth2&package-manager=go_modules&previous-version=0.17.0&new-version=0.18.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 1f040cda0..6d6962cfa 100644 --- a/go.mod +++ b/go.mod @@ -22,9 +22,9 @@ require ( github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.15.0 - golang.org/x/oauth2 v0.17.0 + golang.org/x/oauth2 v0.18.0 golang.org/x/sync v0.6.0 - golang.org/x/term v0.17.0 + golang.org/x/term v0.18.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 ) @@ -58,9 +58,9 @@ require ( go.opentelemetry.io/otel v1.23.0 // indirect go.opentelemetry.io/otel/metric v1.23.0 // indirect go.opentelemetry.io/otel/trace v1.23.0 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.166.0 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index de5f07cf7..c20521e2c 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5Ukgg golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -193,11 +193,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -218,12 +218,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From 49a87ec0ff9e48cb1f67e8d6d8a25c7f8f5dd2cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Mar 2024 14:23:08 +0100 Subject: [PATCH 073/286] Bump golang.org/x/mod from 0.15.0 to 0.16.0 (#1271) Bumps [golang.org/x/mod](https://github.com/golang/mod) from 0.15.0 to 0.16.0.
Commits
  • 766dc5d modfile: use new go version string format in WorkFile.add error
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/mod&package-manager=go_modules&previous-version=0.15.0&new-version=0.16.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6d6962cfa..832efbc66 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/mod v0.15.0 + golang.org/x/mod v0.16.0 golang.org/x/oauth2 v0.18.0 golang.org/x/sync v0.6.0 golang.org/x/term v0.18.0 diff --git a/go.sum b/go.sum index c20521e2c..932d480ab 100644 --- a/go.sum +++ b/go.sum @@ -182,8 +182,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= From d5dc2bd1ca0ed056af9baaa7a7e6ce910d47856e Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 11 Mar 2024 20:35:15 +0530 Subject: [PATCH 074/286] Filter current user from resource permissions (#1262) ## Changes The databricks terraform provider does not allow changing permission of the current user. Instead, the current identity is implictly set to be the owner of all resources on the platform side. This PR introduces a mutator to filter permissions from the bundle configuration at deploy time, allowing users to define permissions for their own identities in their bundle config. This would allow configurations like, allowing both alice and bob to collaborate on the same DAB: ``` permissions: level: CAN_MANAGE user_name: alice level: CAN_MANAGE user_name: bob ``` This PR is a reincarnation of https://github.com/databricks/cli/pull/1145. The earlier attempt had to be reverted due to metadata loss converting to and from the dynamic configuration representation (reverted here: https://github.com/databricks/cli/pull/1179) ## Tests Unit test and manually --- bundle/permissions/filter.go | 80 ++++++++++++++ bundle/permissions/filter_test.go | 174 ++++++++++++++++++++++++++++++ bundle/phases/initialize.go | 1 + 3 files changed, 255 insertions(+) create mode 100644 bundle/permissions/filter.go create mode 100644 bundle/permissions/filter_test.go diff --git a/bundle/permissions/filter.go b/bundle/permissions/filter.go new file mode 100644 index 000000000..f4834a656 --- /dev/null +++ b/bundle/permissions/filter.go @@ -0,0 +1,80 @@ +package permissions + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dyn" +) + +type filterCurrentUser struct{} + +// The databricks terraform provider does not allow changing the permissions of +// current user. The current user is implied to be the owner of all deployed resources. +// This mutator removes the current user from the permissions of all resources. +func FilterCurrentUser() bundle.Mutator { + return &filterCurrentUser{} +} + +func (m *filterCurrentUser) Name() string { + return "FilterCurrentUserFromPermissions" +} + +func filter(currentUser string) dyn.WalkValueFunc { + return func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + // Permissions are defined at top level of a resource. We can skip walking + // after a depth of 4. + // [resource_type].[resource_name].[permissions].[array_index] + // Example: pipelines.foo.permissions.0 + if len(p) > 4 { + return v, dyn.ErrSkip + } + + // We can skip walking at a depth of 3 if the key is not "permissions". + // Example: pipelines.foo.libraries + if len(p) == 3 && p[2] != dyn.Key("permissions") { + return v, dyn.ErrSkip + } + + // We want to be at the level of an individual permission to check it's + // user_name and service_principal_name fields. + if len(p) != 4 || p[2] != dyn.Key("permissions") { + return v, nil + } + + // Filter if the user_name matches the current user + userName, ok := v.Get("user_name").AsString() + if ok && userName == currentUser { + return v, dyn.ErrDrop + } + + // Filter if the service_principal_name matches the current user + servicePrincipalName, ok := v.Get("service_principal_name").AsString() + if ok && servicePrincipalName == currentUser { + return v, dyn.ErrDrop + } + + return v, nil + + } +} + +func (m *filterCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error { + currentUser := b.Config.Workspace.CurrentUser.UserName + + return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + rv, err := dyn.Get(v, "resources") + if err != nil { + return dyn.InvalidValue, err + } + + // Walk the resources and filter out the current user from the permissions + nv, err := dyn.Walk(rv, filter(currentUser)) + if err != nil { + return dyn.InvalidValue, err + } + + // Set the resources with the filtered permissions back into the bundle + return dyn.Set(v, "resources", nv) + }) +} diff --git a/bundle/permissions/filter_test.go b/bundle/permissions/filter_test.go new file mode 100644 index 000000000..07f5ae77d --- /dev/null +++ b/bundle/permissions/filter_test.go @@ -0,0 +1,174 @@ +package permissions + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/assert" +) + +var alice = resources.Permission{ + Level: CAN_MANAGE, + UserName: "alice@databricks.com", +} + +var bob = resources.Permission{ + Level: CAN_VIEW, + UserName: "bob@databricks.com", +} + +var robot = resources.Permission{ + Level: CAN_RUN, + ServicePrincipalName: "i-Robot", +} + +func testFixture(userName string) *bundle.Bundle { + p := []resources.Permission{ + alice, + bob, + robot, + } + + return &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + CurrentUser: &config.User{ + User: &iam.User{ + UserName: userName, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + Permissions: p, + }, + "job2": { + Permissions: p, + }, + }, + Pipelines: map[string]*resources.Pipeline{ + "pipeline1": { + Permissions: p, + }, + }, + Experiments: map[string]*resources.MlflowExperiment{ + "experiment1": { + Permissions: p, + }, + }, + Models: map[string]*resources.MlflowModel{ + "model1": { + Permissions: p, + }, + }, + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "endpoint1": { + Permissions: p, + }, + }, + RegisteredModels: map[string]*resources.RegisteredModel{ + "registered_model1": { + Grants: []resources.Grant{ + { + Principal: "abc", + }, + }, + }, + }, + }, + }, + } + +} + +func TestFilterCurrentUser(t *testing.T) { + b := testFixture("alice@databricks.com") + + err := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, err) + + // Assert current user is filtered out. + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions)) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions)) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions)) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions)) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob) + + // Assert there's no change to the grant. + assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants)) +} + +func TestFilterCurrentServicePrincipal(t *testing.T) { + b := testFixture("i-Robot") + + err := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, err) + + // Assert current user is filtered out. + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions)) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions)) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions)) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions)) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob) + + // Assert there's no change to the grant. + assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants)) +} + +func TestFilterCurrentUserDoesNotErrorWhenNoResources(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "abc", + }, + }, + }, + }, + } + + err := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, err) +} diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 2c401c6b2..6761ffabc 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -41,6 +41,7 @@ func Initialize() bundle.Mutator { mutator.TranslatePaths(), python.WrapperWarning(), permissions.ApplyBundlePermissions(), + permissions.FilterCurrentUser(), metadata.AnnotateJobs(), terraform.Initialize(), scripts.Execute(config.ScriptPostInit), From a44c52a399b1338f540b6e3cc19e6f05c166b2ff Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 11 Mar 2024 16:22:33 +0100 Subject: [PATCH 075/286] Fixed --fail-on-active-runs in Changelog (#1275) ## Changes Fixed --fail-on-active-runs in Changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 527030992..51c601150 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -135,7 +135,7 @@ Dependency updates: Bundles: * Allow specifying executable in artifact section and skip bash from WSL ([#1169](https://github.com/databricks/cli/pull/1169)). - * Added warning when trying to deploy bundle with `--fail-if-running` and running resources ([#1163](https://github.com/databricks/cli/pull/1163)). + * Added warning when trying to deploy bundle with `--fail-on-active-runs` and running resources ([#1163](https://github.com/databricks/cli/pull/1163)). * Group bundle run flags by job and pipeline types ([#1174](https://github.com/databricks/cli/pull/1174)). * Make sure grouped flags are added to the command flag set ([#1180](https://github.com/databricks/cli/pull/1180)). * Add short_name helper function to bundle init templates ([#1167](https://github.com/databricks/cli/pull/1167)). From 4a9a12af19452887a4dc8042dbe2bd389e049ff6 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 11 Mar 2024 22:59:36 +0100 Subject: [PATCH 076/286] Retain location annotation when expanding globs for pipeline libraries (#1274) ## Changes We now keep location metadata associated with every configuration value. When expanding globs for pipeline libraries, this annotation was erased because of the conversion to/from the typed structure. This change modifies the expansion mutator to work with `dyn.Value` and retain the location of the value that holds the glob pattern. ## Tests Unit tests pass. --- .../mutator/expand_pipeline_glob_paths.go | 121 ++++++++++-------- .../expand_pipeline_glob_paths_test.go | 19 ++- libs/dyn/location.go | 13 +- libs/dyn/location_test.go | 13 ++ 4 files changed, 111 insertions(+), 55 deletions(-) diff --git a/bundle/config/mutator/expand_pipeline_glob_paths.go b/bundle/config/mutator/expand_pipeline_glob_paths.go index cb1477784..843bc1271 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths.go @@ -7,7 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" - "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/cli/libs/dyn" ) type expandPipelineGlobPaths struct{} @@ -16,77 +16,94 @@ func ExpandPipelineGlobPaths() bundle.Mutator { return &expandPipelineGlobPaths{} } -func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) error { - for key, pipeline := range b.Config.Resources.Pipelines { - dir, err := pipeline.ConfigFileDirectory() +func (m *expandPipelineGlobPaths) expandLibrary(v dyn.Value) ([]dyn.Value, error) { + // Probe for the path field in the library. + for _, p := range []dyn.Path{ + dyn.NewPath(dyn.Key("notebook"), dyn.Key("path")), + dyn.NewPath(dyn.Key("file"), dyn.Key("path")), + } { + pv, err := dyn.GetByPath(v, p) + if dyn.IsNoSuchKeyError(err) { + continue + } if err != nil { - return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) + return nil, err } - expandedLibraries := make([]pipelines.PipelineLibrary, 0) - for i := 0; i < len(pipeline.Libraries); i++ { + // If the path is empty or not a local path, return the original value. + path := pv.MustString() + if path == "" || !libraries.IsLocalPath(path) { + return []dyn.Value{v}, nil + } - library := &pipeline.Libraries[i] - path := getGlobPatternToExpand(library) - if path == "" || !libraries.IsLocalPath(path) { - expandedLibraries = append(expandedLibraries, *library) - continue - } + dir, err := v.Location().Directory() + if err != nil { + return nil, err + } - matches, err := filepath.Glob(filepath.Join(dir, path)) + matches, err := filepath.Glob(filepath.Join(dir, path)) + if err != nil { + return nil, err + } + + // If there are no matches, return the original value. + if len(matches) == 0 { + return []dyn.Value{v}, nil + } + + // Emit a new value for each match. + var ev []dyn.Value + for _, match := range matches { + m, err := filepath.Rel(dir, match) if err != nil { - return err + return nil, err } - - if len(matches) == 0 { - expandedLibraries = append(expandedLibraries, *library) - continue - } - - for _, match := range matches { - m, err := filepath.Rel(dir, match) - if err != nil { - return err - } - expandedLibraries = append(expandedLibraries, cloneWithPath(library, m)) + nv, err := dyn.SetByPath(v, p, dyn.NewValue(m, pv.Location())) + if err != nil { + return nil, err } + ev = append(ev, nv) } - pipeline.Libraries = expandedLibraries + + return ev, nil } - return nil + // Neither of the library paths were found. This is likely an invalid node, + // but it isn't this mutator's job to enforce that. Return the original value. + return []dyn.Value{v}, nil } -func getGlobPatternToExpand(library *pipelines.PipelineLibrary) string { - if library.File != nil { - return library.File.Path +func (m *expandPipelineGlobPaths) expandSequence(p dyn.Path, v dyn.Value) (dyn.Value, error) { + s, ok := v.AsSequence() + if !ok { + return dyn.InvalidValue, fmt.Errorf("expected sequence, got %s", v.Kind()) } - if library.Notebook != nil { - return library.Notebook.Path + var vs []dyn.Value + for _, sv := range s { + v, err := m.expandLibrary(sv) + if err != nil { + return dyn.InvalidValue, err + } + + vs = append(vs, v...) } - return "" + return dyn.NewValue(vs, v.Location()), nil } -func cloneWithPath(library *pipelines.PipelineLibrary, path string) pipelines.PipelineLibrary { - if library.File != nil { - return pipelines.PipelineLibrary{ - File: &pipelines.FileLibrary{ - Path: path, - }, - } - } +func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) error { + return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + p := dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("pipelines"), + dyn.AnyKey(), + dyn.Key("libraries"), + ) - if library.Notebook != nil { - return pipelines.PipelineLibrary{ - Notebook: &pipelines.NotebookLibrary{ - Path: path, - }, - } - } - - return pipelines.PipelineLibrary{} + // Visit each pipeline's "libraries" field and expand any glob patterns. + return dyn.MapByPattern(v, p, m.expandSequence) + }) } func (*expandPipelineGlobPaths) Name() string { diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go index e2cba80e2..828eac3de 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths_test.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -35,6 +35,10 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "test1.py")) touchEmptyFile(t, filepath.Join(dir, "test/test2.py")) touchEmptyFile(t, filepath.Join(dir, "test/test3.py")) + touchEmptyFile(t, filepath.Join(dir, "relative/test4.py")) + touchEmptyFile(t, filepath.Join(dir, "relative/test5.py")) + touchEmptyFile(t, filepath.Join(dir, "skip/test6.py")) + touchEmptyFile(t, filepath.Join(dir, "skip/test7.py")) b := &bundle.Bundle{ Config: config.Root{ @@ -54,7 +58,13 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { }, { File: &pipelines.FileLibrary{ - Path: "./**/*.py", + Path: "./test/*.py", + }, + }, + { + // This value is annotated to be defined in the "./relative" directory. + File: &pipelines.FileLibrary{ + Path: "./*.py", }, }, { @@ -96,13 +106,14 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { } bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", filepath.Join(dir, "relative", "resource.yml")) m := ExpandPipelineGlobPaths() err := bundle.Apply(context.Background(), b, m) require.NoError(t, err) libraries := b.Config.Resources.Pipelines["pipeline"].Libraries - require.Len(t, libraries, 11) + require.Len(t, libraries, 13) // Making sure glob patterns are expanded correctly require.True(t, containsNotebook(libraries, filepath.Join("test", "test2.ipynb"))) @@ -110,6 +121,10 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { require.True(t, containsFile(libraries, filepath.Join("test", "test2.py"))) require.True(t, containsFile(libraries, filepath.Join("test", "test3.py"))) + // These patterns are defined relative to "./relative" + require.True(t, containsFile(libraries, "test4.py")) + require.True(t, containsFile(libraries, "test5.py")) + // Making sure exact file references work as well require.True(t, containsNotebook(libraries, "test1.ipynb")) diff --git a/libs/dyn/location.go b/libs/dyn/location.go index cd369193e..961d2f121 100644 --- a/libs/dyn/location.go +++ b/libs/dyn/location.go @@ -1,6 +1,9 @@ package dyn -import "fmt" +import ( + "fmt" + "path/filepath" +) type Location struct { File string @@ -11,3 +14,11 @@ type Location struct { func (l Location) String() string { return fmt.Sprintf("%s:%d:%d", l.File, l.Line, l.Column) } + +func (l Location) Directory() (string, error) { + if l.File == "" { + return "", fmt.Errorf("no file in location") + } + + return filepath.Dir(l.File), nil +} diff --git a/libs/dyn/location_test.go b/libs/dyn/location_test.go index 29226d73d..6d856410b 100644 --- a/libs/dyn/location_test.go +++ b/libs/dyn/location_test.go @@ -11,3 +11,16 @@ func TestLocation(t *testing.T) { loc := dyn.Location{File: "file", Line: 1, Column: 2} assert.Equal(t, "file:1:2", loc.String()) } + +func TestLocationDirectory(t *testing.T) { + loc := dyn.Location{File: "file", Line: 1, Column: 2} + dir, err := loc.Directory() + assert.NoError(t, err) + assert.Equal(t, ".", dir) +} + +func TestLocationDirectoryNoFile(t *testing.T) { + loc := dyn.Location{} + _, err := loc.Directory() + assert.Error(t, err) +} From 945d522dab796265cc9bb71b3692316f21ec02b5 Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Mon, 11 Mar 2024 23:24:23 +0100 Subject: [PATCH 077/286] Propagate correct `User-Agent` for CLI (#1264) ## Changes This PR migrates `databricks auth login` HTTP client to the one from Go SDK, making API calls more robust and containing our unified user agent. ## Tests Unit tests left almost unchanged --- libs/auth/oauth.go | 47 ++++++++++++++++------------------------- libs/auth/oauth_test.go | 33 ++++++++++++----------------- 2 files changed, 32 insertions(+), 48 deletions(-) diff --git a/libs/auth/oauth.go b/libs/auth/oauth.go index dd27d04b2..4ce0d4def 100644 --- a/libs/auth/oauth.go +++ b/libs/auth/oauth.go @@ -6,16 +6,14 @@ import ( "crypto/sha256" _ "embed" "encoding/base64" - "encoding/json" "errors" "fmt" - "io" "net" - "net/http" "strings" "time" "github.com/databricks/cli/libs/auth/cache" + "github.com/databricks/databricks-sdk-go/httpclient" "github.com/databricks/databricks-sdk-go/retries" "github.com/pkg/browser" "golang.org/x/oauth2" @@ -43,16 +41,12 @@ type PersistentAuth struct { Host string AccountID string - http httpGet + http *httpclient.ApiClient cache tokenCache ln net.Listener browser func(string) error } -type httpGet interface { - Get(string) (*http.Response, error) -} - type tokenCache interface { Store(key string, t *oauth2.Token) error Lookup(key string) (*oauth2.Token, error) @@ -77,10 +71,12 @@ func (a *PersistentAuth) Load(ctx context.Context) (*oauth2.Token, error) { } // OAuth2 config is invoked only for expired tokens to speed up // the happy path in the token retrieval - cfg, err := a.oauth2Config() + cfg, err := a.oauth2Config(ctx) if err != nil { return nil, err } + // make OAuth2 library use our client + ctx = a.http.InContextForOAuth2(ctx) // eagerly refresh token refreshed, err := cfg.TokenSource(ctx, t).Token() if err != nil { @@ -110,7 +106,7 @@ func (a *PersistentAuth) Challenge(ctx context.Context) error { if err != nil { return fmt.Errorf("init: %w", err) } - cfg, err := a.oauth2Config() + cfg, err := a.oauth2Config(ctx) if err != nil { return err } @@ -120,6 +116,8 @@ func (a *PersistentAuth) Challenge(ctx context.Context) error { } defer cb.Close() state, pkce := a.stateAndPKCE() + // make OAuth2 library use our client + ctx = a.http.InContextForOAuth2(ctx) ts := authhandler.TokenSourceWithPKCE(ctx, cfg, state, cb.Handler, pkce) t, err := ts.Token() if err != nil { @@ -138,7 +136,9 @@ func (a *PersistentAuth) init(ctx context.Context) error { return ErrFetchCredentials } if a.http == nil { - a.http = http.DefaultClient + a.http = httpclient.NewApiClient(httpclient.ClientConfig{ + // noop + }) } if a.cache == nil { a.cache = &cache.TokenCache{} @@ -172,7 +172,7 @@ func (a *PersistentAuth) Close() error { return a.ln.Close() } -func (a *PersistentAuth) oidcEndpoints() (*oauthAuthorizationServer, error) { +func (a *PersistentAuth) oidcEndpoints(ctx context.Context) (*oauthAuthorizationServer, error) { prefix := a.key() if a.AccountID != "" { return &oauthAuthorizationServer{ @@ -180,31 +180,20 @@ func (a *PersistentAuth) oidcEndpoints() (*oauthAuthorizationServer, error) { TokenEndpoint: fmt.Sprintf("%s/v1/token", prefix), }, nil } + var oauthEndpoints oauthAuthorizationServer oidc := fmt.Sprintf("%s/oidc/.well-known/oauth-authorization-server", prefix) - oidcResponse, err := a.http.Get(oidc) + err := a.http.Do(ctx, "GET", oidc, httpclient.WithResponseUnmarshal(&oauthEndpoints)) if err != nil { return nil, fmt.Errorf("fetch .well-known: %w", err) } - if oidcResponse.StatusCode != 200 { + var httpErr *httpclient.HttpError + if errors.As(err, &httpErr) && httpErr.StatusCode != 200 { return nil, ErrOAuthNotSupported } - if oidcResponse.Body == nil { - return nil, fmt.Errorf("fetch .well-known: empty body") - } - defer oidcResponse.Body.Close() - raw, err := io.ReadAll(oidcResponse.Body) - if err != nil { - return nil, fmt.Errorf("read .well-known: %w", err) - } - var oauthEndpoints oauthAuthorizationServer - err = json.Unmarshal(raw, &oauthEndpoints) - if err != nil { - return nil, fmt.Errorf("parse .well-known: %w", err) - } return &oauthEndpoints, nil } -func (a *PersistentAuth) oauth2Config() (*oauth2.Config, error) { +func (a *PersistentAuth) oauth2Config(ctx context.Context) (*oauth2.Config, error) { // in this iteration of CLI, we're using all scopes by default, // because tools like CLI and Terraform do use all apis. This // decision may be reconsidered later, once we have a proper @@ -213,7 +202,7 @@ func (a *PersistentAuth) oauth2Config() (*oauth2.Config, error) { "offline_access", "all-apis", } - endpoints, err := a.oidcEndpoints() + endpoints, err := a.oidcEndpoints(ctx) if err != nil { return nil, fmt.Errorf("oidc: %w", err) } diff --git a/libs/auth/oauth_test.go b/libs/auth/oauth_test.go index 9b5aa9ac9..ea6a8061e 100644 --- a/libs/auth/oauth_test.go +++ b/libs/auth/oauth_test.go @@ -5,14 +5,14 @@ import ( "crypto/tls" _ "embed" "fmt" - "io" "net/http" "net/url" - "strings" "testing" "time" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/httpclient" + "github.com/databricks/databricks-sdk-go/httpclient/fixtures" "github.com/databricks/databricks-sdk-go/qa" "github.com/stretchr/testify/assert" "golang.org/x/oauth2" @@ -24,34 +24,29 @@ func TestOidcEndpointsForAccounts(t *testing.T) { AccountID: "xyz", } defer p.Close() - s, err := p.oidcEndpoints() + s, err := p.oidcEndpoints(context.Background()) assert.NoError(t, err) assert.Equal(t, "https://abc/oidc/accounts/xyz/v1/authorize", s.AuthorizationEndpoint) assert.Equal(t, "https://abc/oidc/accounts/xyz/v1/token", s.TokenEndpoint) } -type mockGet func(url string) (*http.Response, error) - -func (m mockGet) Get(url string) (*http.Response, error) { - return m(url) -} - func TestOidcForWorkspace(t *testing.T) { p := &PersistentAuth{ Host: "abc", - http: mockGet(func(url string) (*http.Response, error) { - assert.Equal(t, "https://abc/oidc/.well-known/oauth-authorization-server", url) - return &http.Response{ - StatusCode: 200, - Body: io.NopCloser(strings.NewReader(`{ - "authorization_endpoint": "a", - "token_endpoint": "b" - }`)), - }, nil + http: httpclient.NewApiClient(httpclient.ClientConfig{ + Transport: fixtures.MappingTransport{ + "GET /oidc/.well-known/oauth-authorization-server": { + Status: 200, + Response: map[string]string{ + "authorization_endpoint": "a", + "token_endpoint": "b", + }, + }, + }, }), } defer p.Close() - endpoints, err := p.oidcEndpoints() + endpoints, err := p.oidcEndpoints(context.Background()) assert.NoError(t, err) assert.Equal(t, "a", endpoints.AuthorizationEndpoint) assert.Equal(t, "b", endpoints.TokenEndpoint) From c7818560ca1c60a3324c190fa2b24e29c5acf8c2 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 12 Mar 2024 15:12:34 +0100 Subject: [PATCH 078/286] Add usage string when command fails with incorrect arguments (#1276) ## Changes Add usage string when command fails with incorrect arguments Fixes #1119 ## Tests Example output ``` > databricks libraries cluster-status Error: accepts 1 arg(s), received 0 Usage: databricks libraries cluster-status CLUSTER_ID [flags] Flags: -h, --help help for cluster-status Global Flags: --debug enable debug logging -o, --output type output type: text or json (default text) -p, --profile string ~/.databrickscfg profile -t, --target string bundle target to use (if applicable) ``` --- .codegen/service.go.tmpl | 4 +- bundle/schema/docs/bundle_descriptions.json | 24 ++--- cmd/account/access-control/access-control.go | 4 +- cmd/account/billable-usage/billable-usage.go | 2 +- cmd/account/budgets/budgets.go | 2 +- .../csp-enablement-account.go | 2 +- .../custom-app-integration.go | 6 +- .../encryption-keys/encryption-keys.go | 4 +- .../esm-enablement-account.go | 2 +- cmd/account/groups/groups.go | 4 +- .../ip-access-lists/ip-access-lists.go | 8 +- cmd/account/log-delivery/log-delivery.go | 8 +- .../metastore-assignments.go | 10 +- cmd/account/metastores/metastores.go | 8 +- .../network-connectivity.go | 20 ++-- cmd/account/networks/networks.go | 4 +- .../o-auth-published-apps.go | 2 +- .../personal-compute/personal-compute.go | 4 +- cmd/account/private-access/private-access.go | 8 +- .../published-app-integration.go | 8 +- .../service-principal-secrets.go | 6 +- .../service-principals/service-principals.go | 4 +- cmd/account/settings/settings.go | 3 + .../storage-credentials.go | 10 +- cmd/account/users/users.go | 4 +- cmd/account/vpc-endpoints/vpc-endpoints.go | 4 +- .../workspace-assignment.go | 8 +- cmd/account/workspaces/workspaces.go | 4 +- cmd/api/api.go | 3 +- cmd/bundle/deploy.go | 3 +- cmd/bundle/deployment/bind.go | 3 +- cmd/bundle/deployment/unbind.go | 3 +- cmd/bundle/destroy.go | 3 +- cmd/bundle/init.go | 2 +- cmd/bundle/launch.go | 2 +- cmd/bundle/run.go | 2 +- cmd/bundle/schema.go | 3 +- cmd/bundle/summary.go | 2 +- cmd/bundle/sync.go | 3 +- cmd/bundle/validate.go | 3 +- cmd/fs/cat.go | 2 +- cmd/fs/cp.go | 2 +- cmd/fs/ls.go | 2 +- cmd/fs/mkdir.go | 2 +- cmd/fs/rm.go | 2 +- cmd/labs/install.go | 3 +- cmd/labs/show.go | 3 +- cmd/labs/uninstall.go | 3 +- cmd/labs/upgrade.go | 3 +- cmd/root/args.go | 45 ++++++++ cmd/sync/sync.go | 2 +- cmd/version/version.go | 3 +- cmd/workspace/alerts/alerts.go | 2 +- cmd/workspace/apps/apps.go | 8 +- .../artifact-allowlists.go | 4 +- .../automatic-cluster-update.go | 2 +- cmd/workspace/catalogs/catalogs.go | 10 +- cmd/workspace/clean-rooms/clean-rooms.go | 8 +- .../cluster-policies/cluster-policies.go | 12 +-- cmd/workspace/clusters/clusters.go | 30 +++--- cmd/workspace/connections/connections.go | 2 +- .../csp-enablement/csp-enablement.go | 2 +- .../dashboard-widgets/dashboard-widgets.go | 4 +- cmd/workspace/dashboards/dashboards.go | 2 +- .../default-namespace/default-namespace.go | 4 +- .../esm-enablement/esm-enablement.go | 2 +- cmd/workspace/experiments/experiments.go | 86 +++++++-------- .../external-locations/external-locations.go | 12 +-- cmd/workspace/functions/functions.go | 2 +- .../git-credentials/git-credentials.go | 4 +- .../global-init-scripts.go | 8 +- cmd/workspace/grants/grants.go | 6 +- cmd/workspace/groups/groups.go | 4 +- .../instance-pools/instance-pools.go | 10 +- .../instance-profiles/instance-profiles.go | 12 +-- .../ip-access-lists/ip-access-lists.go | 8 +- cmd/workspace/jobs/jobs.go | 20 ++-- .../lakehouse-monitors/lakehouse-monitors.go | 24 ++--- cmd/workspace/lakeview/lakeview.go | 2 +- cmd/workspace/libraries/libraries.go | 2 +- cmd/workspace/metastores/metastores.go | 10 +- .../model-registry/model-registry.go | 102 +++++++++--------- .../model-versions/model-versions.go | 10 +- cmd/workspace/online-tables/online-tables.go | 6 +- .../permission-migration.go | 4 +- cmd/workspace/permissions/permissions.go | 8 +- cmd/workspace/pipelines/pipelines.go | 4 +- .../policy-families/policy-families.go | 4 +- cmd/workspace/providers/providers.go | 6 +- cmd/workspace/queries/queries.go | 2 +- cmd/workspace/query-history/query-history.go | 2 +- .../query-visualizations.go | 4 +- .../recipient-activation.go | 4 +- cmd/workspace/recipients/recipients.go | 10 +- .../registered-models/registered-models.go | 12 +-- cmd/workspace/repos/overrides.go | 2 +- cmd/workspace/repos/repos.go | 6 +- .../restrict-workspace-admins.go | 4 +- cmd/workspace/schemas/schemas.go | 6 +- cmd/workspace/secrets/put_secret.go | 4 +- cmd/workspace/secrets/secrets.go | 28 ++--- .../service-principals/service-principals.go | 4 +- .../serving-endpoints/serving-endpoints.go | 26 ++--- cmd/workspace/settings/settings.go | 3 + cmd/workspace/shares/shares.go | 14 +-- .../storage-credentials.go | 14 +-- .../system-schemas/system-schemas.go | 6 +- .../table-constraints/table-constraints.go | 2 +- cmd/workspace/tables/tables.go | 2 +- .../token-management/token-management.go | 8 +- cmd/workspace/tokens/tokens.go | 4 +- cmd/workspace/users/users.go | 8 +- .../vector-search-endpoints.go | 10 +- .../vector-search-indexes.go | 20 ++-- cmd/workspace/volumes/volumes.go | 6 +- cmd/workspace/warehouses/warehouses.go | 6 +- .../workspace-bindings/workspace-bindings.go | 8 +- .../workspace-conf/workspace-conf.go | 2 +- cmd/workspace/workspace/export_dir.go | 2 +- cmd/workspace/workspace/import_dir.go | 2 +- cmd/workspace/workspace/workspace.go | 20 ++-- internal/clusters_test.go | 2 +- internal/secrets_test.go | 2 +- internal/workspace_test.go | 4 +- 124 files changed, 525 insertions(+), 461 deletions(-) create mode 100644 cmd/root/args.go diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 0665b661f..4887a6230 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -194,7 +194,7 @@ func new{{.PascalName}}() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { {{- if $hasDifferentArgsWithJsonFlag }} if cmd.Flags().Changed("json") { - err := cobra.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args) + err := root.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args) if err != nil { {{- if eq 0 (len .Request.RequiredPathFields) }} return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") @@ -206,7 +206,7 @@ func new{{.PascalName}}() *cobra.Command { } {{- end }} {{- if $hasRequiredArgs }} - check := cobra.ExactArgs({{len .RequiredPositionalArguments}}) + check := root.ExactArgs({{len .RequiredPositionalArguments}}) return check(cmd, args) {{- else}} return nil diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 494c8c752..d107af73b 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -322,7 +322,7 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -785,7 +785,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -930,7 +930,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -1269,7 +1269,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -1371,7 +1371,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -1449,7 +1449,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -2855,7 +2855,7 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -3318,7 +3318,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -3463,7 +3463,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -3802,7 +3802,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -3904,7 +3904,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, @@ -3982,7 +3982,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" } } }, diff --git a/cmd/account/access-control/access-control.go b/cmd/account/access-control/access-control.go index 76ad4b51f..f6761a1b4 100755 --- a/cmd/account/access-control/access-control.go +++ b/cmd/account/access-control/access-control.go @@ -72,7 +72,7 @@ func newGetAssignableRolesForResource() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -140,7 +140,7 @@ func newGetRuleSet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/account/billable-usage/billable-usage.go b/cmd/account/billable-usage/billable-usage.go index d8d36bacc..2a2cca605 100755 --- a/cmd/account/billable-usage/billable-usage.go +++ b/cmd/account/billable-usage/billable-usage.go @@ -78,7 +78,7 @@ func newDownload() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/account/budgets/budgets.go b/cmd/account/budgets/budgets.go index e6f87a953..82f7b9f01 100755 --- a/cmd/account/budgets/budgets.go +++ b/cmd/account/budgets/budgets.go @@ -317,7 +317,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/csp-enablement-account/csp-enablement-account.go b/cmd/account/csp-enablement-account/csp-enablement-account.go index ca2170fad..5c7b9b926 100755 --- a/cmd/account/csp-enablement-account/csp-enablement-account.go +++ b/cmd/account/csp-enablement-account/csp-enablement-account.go @@ -68,7 +68,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index 79dd50c1f..ca9f69a35 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -139,7 +139,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -197,7 +197,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -301,7 +301,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/encryption-keys/encryption-keys.go b/cmd/account/encryption-keys/encryption-keys.go index c82f385ed..44545ccfa 100755 --- a/cmd/account/encryption-keys/encryption-keys.go +++ b/cmd/account/encryption-keys/encryption-keys.go @@ -163,7 +163,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -235,7 +235,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/esm-enablement-account/esm-enablement-account.go b/cmd/account/esm-enablement-account/esm-enablement-account.go index fc793d60a..0c936c4de 100755 --- a/cmd/account/esm-enablement-account/esm-enablement-account.go +++ b/cmd/account/esm-enablement-account/esm-enablement-account.go @@ -66,7 +66,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index 68ae1b2af..a7e1ac430 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -87,7 +87,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -295,7 +295,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/account/ip-access-lists/ip-access-lists.go b/cmd/account/ip-access-lists/ip-access-lists.go index 364c5a919..5c6d27dd7 100755 --- a/cmd/account/ip-access-lists/ip-access-lists.go +++ b/cmd/account/ip-access-lists/ip-access-lists.go @@ -116,13 +116,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'label', 'list_type' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -395,13 +395,13 @@ func newReplace() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only IP_ACCESS_LIST_ID as positional arguments. Provide 'label', 'list_type', 'enabled' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index f51573e9f..4584f4d2b 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -152,7 +152,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -288,7 +288,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -353,13 +353,13 @@ func newPatchStatus() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only LOG_DELIVERY_CONFIGURATION_ID as positional arguments. Provide 'status' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/account/metastore-assignments/metastore-assignments.go b/cmd/account/metastore-assignments/metastore-assignments.go index 013d25cff..d7f32ccb9 100755 --- a/cmd/account/metastore-assignments/metastore-assignments.go +++ b/cmd/account/metastore-assignments/metastore-assignments.go @@ -75,7 +75,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -145,7 +145,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -210,7 +210,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -272,7 +272,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -333,7 +333,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/account/metastores/metastores.go b/cmd/account/metastores/metastores.go index bcccff812..7c8e3f2c1 100755 --- a/cmd/account/metastores/metastores.go +++ b/cmd/account/metastores/metastores.go @@ -70,7 +70,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -135,7 +135,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -193,7 +193,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -294,7 +294,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index fbde0694e..2b6cf54a1 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -80,13 +80,13 @@ func newCreateNetworkConnectivityConfiguration() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'region' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -171,13 +171,13 @@ func newCreatePrivateEndpointRule() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only NETWORK_CONNECTIVITY_CONFIG_ID as positional arguments. Provide 'resource_id', 'group_id' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -250,7 +250,7 @@ func newDeleteNetworkConnectivityConfiguration() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -313,7 +313,7 @@ func newDeletePrivateEndpointRule() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -372,7 +372,7 @@ func newGetNetworkConnectivityConfiguration() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -431,7 +431,7 @@ func newGetPrivateEndpointRule() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -489,7 +489,7 @@ func newListNetworkConnectivityConfigurations() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -544,7 +544,7 @@ func newListPrivateEndpointRules() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/networks/networks.go b/cmd/account/networks/networks.go index 6dc772973..05ef0c815 100755 --- a/cmd/account/networks/networks.go +++ b/cmd/account/networks/networks.go @@ -81,13 +81,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'network_name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/o-auth-published-apps/o-auth-published-apps.go b/cmd/account/o-auth-published-apps/o-auth-published-apps.go index a9e94e5aa..6573b0529 100755 --- a/cmd/account/o-auth-published-apps/o-auth-published-apps.go +++ b/cmd/account/o-auth-published-apps/o-auth-published-apps.go @@ -66,7 +66,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/account/personal-compute/personal-compute.go b/cmd/account/personal-compute/personal-compute.go index 79090faf2..7a2a04525 100755 --- a/cmd/account/personal-compute/personal-compute.go +++ b/cmd/account/personal-compute/personal-compute.go @@ -74,7 +74,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -129,7 +129,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index 4641223c8..d527fa64e 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -93,13 +93,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'private_access_settings_name', 'region' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -395,13 +395,13 @@ func newReplace() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only PRIVATE_ACCESS_SETTINGS_ID as positional arguments. Provide 'private_access_settings_name', 'region' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/account/published-app-integration/published-app-integration.go b/cmd/account/published-app-integration/published-app-integration.go index 8befd39ba..32fed5cd0 100755 --- a/cmd/account/published-app-integration/published-app-integration.go +++ b/cmd/account/published-app-integration/published-app-integration.go @@ -75,7 +75,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -139,7 +139,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -197,7 +197,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -300,7 +300,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/service-principal-secrets/service-principal-secrets.go b/cmd/account/service-principal-secrets/service-principal-secrets.go index 0239df664..47cfa4b08 100755 --- a/cmd/account/service-principal-secrets/service-principal-secrets.go +++ b/cmd/account/service-principal-secrets/service-principal-secrets.go @@ -79,7 +79,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -141,7 +141,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -205,7 +205,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index b9ad194cf..c86810f1d 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -85,7 +85,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -294,7 +294,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/account/settings/settings.go b/cmd/account/settings/settings.go index a750e81e0..29bb6ad15 100755 --- a/cmd/account/settings/settings.go +++ b/cmd/account/settings/settings.go @@ -23,6 +23,9 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "settings", }, + + // This service is being previewed; hide from help output. + Hidden: true, } // Add subservices diff --git a/cmd/account/storage-credentials/storage-credentials.go b/cmd/account/storage-credentials/storage-credentials.go index 61f8521bc..0a20b86b6 100755 --- a/cmd/account/storage-credentials/storage-credentials.go +++ b/cmd/account/storage-credentials/storage-credentials.go @@ -78,7 +78,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -146,7 +146,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -208,7 +208,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -268,7 +268,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -333,7 +333,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index ab4bd95bb..289d2972f 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -93,7 +93,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -310,7 +310,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/account/vpc-endpoints/vpc-endpoints.go b/cmd/account/vpc-endpoints/vpc-endpoints.go index 0c15ca9c6..e6c6c126a 100755 --- a/cmd/account/vpc-endpoints/vpc-endpoints.go +++ b/cmd/account/vpc-endpoints/vpc-endpoints.go @@ -88,13 +88,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'vpc_endpoint_name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index 20f885249..935d64f05 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -72,7 +72,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -137,7 +137,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -199,7 +199,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -261,7 +261,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 2cc0cb1a7..1ec6230b2 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -117,13 +117,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'workspace_name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/api/api.go b/cmd/api/api.go index 11a5e3e36..03460f717 100644 --- a/cmd/api/api.go +++ b/cmd/api/api.go @@ -5,6 +5,7 @@ import ( "net/http" "strings" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/client" @@ -35,7 +36,7 @@ func makeCommand(method string) *cobra.Command { command := &cobra.Command{ Use: strings.ToLower(method), - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), Short: fmt.Sprintf("Perform %s request", method), RunE: func(cmd *cobra.Command, args []string) error { var path = args[0] diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index 60426ecad..0ba8a187a 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) @@ -13,7 +14,7 @@ func newDeployCommand() *cobra.Command { cmd := &cobra.Command{ Use: "deploy", Short: "Deploy bundle", - Args: cobra.NoArgs, + Args: root.NoArgs, PreRunE: utils.ConfigureBundleWithVariables, } diff --git a/cmd/bundle/deployment/bind.go b/cmd/bundle/deployment/bind.go index 1287eb044..184cac1d1 100644 --- a/cmd/bundle/deployment/bind.go +++ b/cmd/bundle/deployment/bind.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/spf13/cobra" ) @@ -16,7 +17,7 @@ func newBindCommand() *cobra.Command { cmd := &cobra.Command{ Use: "bind KEY RESOURCE_ID", Short: "Bind bundle-defined resources to existing resources", - Args: cobra.ExactArgs(2), + Args: root.ExactArgs(2), PreRunE: utils.ConfigureBundleWithVariables, } diff --git a/cmd/bundle/deployment/unbind.go b/cmd/bundle/deployment/unbind.go index 9f0e4f7c7..b5fb69200 100644 --- a/cmd/bundle/deployment/unbind.go +++ b/cmd/bundle/deployment/unbind.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) @@ -13,7 +14,7 @@ func newUnbindCommand() *cobra.Command { cmd := &cobra.Command{ Use: "unbind KEY", Short: "Unbind bundle-defined resources from its managed remote resource", - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), PreRunE: utils.ConfigureBundleWithVariables, } diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index b27161f98..dc5ea45f8 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" @@ -18,7 +19,7 @@ func newDestroyCommand() *cobra.Command { cmd := &cobra.Command{ Use: "destroy", Short: "Destroy deployed bundle resources", - Args: cobra.NoArgs, + Args: root.NoArgs, PreRunE: utils.ConfigureBundleWithVariables, } diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 704bad64d..6845ab672 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -128,7 +128,7 @@ func newInitCommand() *cobra.Command { cmd := &cobra.Command{ Use: "init [TEMPLATE_PATH]", Short: "Initialize using a bundle template", - Args: cobra.MaximumNArgs(1), + Args: root.MaximumNArgs(1), Long: fmt.Sprintf(`Initialize using a bundle template. TEMPLATE_PATH optionally specifies which template to use. It can be one of the following: diff --git a/cmd/bundle/launch.go b/cmd/bundle/launch.go index bbb43600a..f376ebdae 100644 --- a/cmd/bundle/launch.go +++ b/cmd/bundle/launch.go @@ -12,7 +12,7 @@ func newLaunchCommand() *cobra.Command { Use: "launch", Short: "Launches a notebook on development cluster", Long: `Reads a file and executes it on dev cluster`, - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), // We're not ready to expose this command until we specify its semantics. Hidden: true, diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 8814bee0b..9b4ad5c8d 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -19,7 +19,7 @@ func newRunCommand() *cobra.Command { cmd := &cobra.Command{ Use: "run [flags] KEY", Short: "Run a resource (e.g. a job or a pipeline)", - Args: cobra.MaximumNArgs(1), + Args: root.MaximumNArgs(1), PreRunE: utils.ConfigureBundleWithVariables, } diff --git a/cmd/bundle/schema.go b/cmd/bundle/schema.go index eb0c1fc9e..0f27142bd 100644 --- a/cmd/bundle/schema.go +++ b/cmd/bundle/schema.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/schema" + "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) @@ -13,7 +14,7 @@ func newSchemaCommand() *cobra.Command { cmd := &cobra.Command{ Use: "schema", Short: "Generate JSON Schema for bundle configuration", - Args: cobra.NoArgs, + Args: root.NoArgs, } cmd.RunE = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index 8b475661f..68354a0a2 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -20,7 +20,7 @@ func newSummaryCommand() *cobra.Command { cmd := &cobra.Command{ Use: "summary", Short: "Describe the bundle resources and their deployment states", - Args: cobra.NoArgs, + Args: root.NoArgs, PreRunE: utils.ConfigureBundleWithVariables, // This command is currently intended for the Databricks VSCode extension only diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index d9f8582c2..20ec2fcd3 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/sync" "github.com/spf13/cobra" @@ -47,7 +48,7 @@ func newSyncCommand() *cobra.Command { cmd := &cobra.Command{ Use: "sync [flags]", Short: "Synchronize bundle tree to the workspace", - Args: cobra.NoArgs, + Args: root.NoArgs, PreRunE: utils.ConfigureBundleWithVariables, } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 9a5bf1e9a..a650fcfde 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/log" "github.com/spf13/cobra" ) @@ -14,7 +15,7 @@ func newValidateCommand() *cobra.Command { cmd := &cobra.Command{ Use: "validate", Short: "Validate configuration", - Args: cobra.NoArgs, + Args: root.NoArgs, PreRunE: utils.ConfigureBundleWithVariables, } diff --git a/cmd/fs/cat.go b/cmd/fs/cat.go index df94d1d73..7a6f42cba 100644 --- a/cmd/fs/cat.go +++ b/cmd/fs/cat.go @@ -11,7 +11,7 @@ func newCatCommand() *cobra.Command { Use: "cat FILE_PATH", Short: "Show file content.", Long: `Show the contents of a file in DBFS or a UC Volume.`, - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } diff --git a/cmd/fs/cp.go b/cmd/fs/cp.go index 1ba0daf0c..52feb8905 100644 --- a/cmd/fs/cp.go +++ b/cmd/fs/cp.go @@ -141,7 +141,7 @@ func newCpCommand() *cobra.Command { When copying a file, if TARGET_PATH is a directory, the file will be created inside the directory, otherwise the file is created at TARGET_PATH. `, - Args: cobra.ExactArgs(2), + Args: root.ExactArgs(2), PreRunE: root.MustWorkspaceClient, } diff --git a/cmd/fs/ls.go b/cmd/fs/ls.go index 1d9ee876a..cec9b98ba 100644 --- a/cmd/fs/ls.go +++ b/cmd/fs/ls.go @@ -42,7 +42,7 @@ func newLsCommand() *cobra.Command { Use: "ls DIR_PATH", Short: "Lists files.", Long: `Lists files in DBFS and UC Volumes.`, - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } diff --git a/cmd/fs/mkdir.go b/cmd/fs/mkdir.go index dc054d8a7..074a7543d 100644 --- a/cmd/fs/mkdir.go +++ b/cmd/fs/mkdir.go @@ -13,7 +13,7 @@ func newMkdirCommand() *cobra.Command { Aliases: []string{"mkdirs"}, Short: "Make directories.", Long: `Make directories in DBFS and UC Volumes. Mkdir will create directories along the path to the argument directory.`, - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } diff --git a/cmd/fs/rm.go b/cmd/fs/rm.go index 8a7b6571d..5f2904e71 100644 --- a/cmd/fs/rm.go +++ b/cmd/fs/rm.go @@ -11,7 +11,7 @@ func newRmCommand() *cobra.Command { Use: "rm PATH", Short: "Remove files and directories.", Long: `Remove files and directories from DBFS and UC Volumes.`, - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } diff --git a/cmd/labs/install.go b/cmd/labs/install.go index 31db43892..6ed6b2e91 100644 --- a/cmd/labs/install.go +++ b/cmd/labs/install.go @@ -2,13 +2,14 @@ package labs import ( "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) func newInstallCommand() *cobra.Command { return &cobra.Command{ Use: "install NAME", - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), Short: "Installs project", RunE: func(cmd *cobra.Command, args []string) error { inst, err := project.NewInstaller(cmd, args[0]) diff --git a/cmd/labs/show.go b/cmd/labs/show.go index 1ae6498c8..c36f0bda3 100644 --- a/cmd/labs/show.go +++ b/cmd/labs/show.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/spf13/cobra" ) @@ -11,7 +12,7 @@ import ( func newShowCommand() *cobra.Command { return &cobra.Command{ Use: "show NAME", - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), Short: "Shows information about the project", Annotations: map[string]string{ "template": cmdio.Heredoc(` diff --git a/cmd/labs/uninstall.go b/cmd/labs/uninstall.go index b2c83fff7..424df38db 100644 --- a/cmd/labs/uninstall.go +++ b/cmd/labs/uninstall.go @@ -4,13 +4,14 @@ import ( "fmt" "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) func newUninstallCommand() *cobra.Command { return &cobra.Command{ Use: "uninstall NAME", - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), Short: "Uninstalls project", ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var names []string diff --git a/cmd/labs/upgrade.go b/cmd/labs/upgrade.go index 88b7bc928..d0a8dec9b 100644 --- a/cmd/labs/upgrade.go +++ b/cmd/labs/upgrade.go @@ -2,13 +2,14 @@ package labs import ( "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) func newUpgradeCommand() *cobra.Command { return &cobra.Command{ Use: "upgrade NAME", - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), Short: "Upgrades project", RunE: func(cmd *cobra.Command, args []string) error { inst, err := project.NewUpgrader(cmd, args[0]) diff --git a/cmd/root/args.go b/cmd/root/args.go new file mode 100644 index 000000000..800d6add7 --- /dev/null +++ b/cmd/root/args.go @@ -0,0 +1,45 @@ +package root + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +type InvalidArgsError struct { + // The command that was run. + Command *cobra.Command + // The error message. + Message string +} + +func (e *InvalidArgsError) Error() string { + return fmt.Sprintf("%s\n\n%s", e.Message, e.Command.UsageString()) +} + +func ExactArgs(n int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) != n { + return &InvalidArgsError{Message: fmt.Sprintf("accepts %d arg(s), received %d", n, len(args)), Command: cmd} + } + return nil + } +} + +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + msg := fmt.Sprintf("unknown command %q for %q", args[0], cmd.CommandPath()) + return &InvalidArgsError{Message: msg, Command: cmd} + } + return nil +} + +func MaximumNArgs(n int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) > n { + msg := fmt.Sprintf("accepts at most %d arg(s), received %d", n, len(args)) + return &InvalidArgsError{Message: msg, Command: cmd} + } + return nil + } +} diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index c613e8ca1..f08d3d61a 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -78,7 +78,7 @@ func New() *cobra.Command { cmd := &cobra.Command{ Use: "sync [flags] SRC DST", Short: "Synchronize a local directory to a workspace directory", - Args: cobra.MaximumNArgs(2), + Args: root.MaximumNArgs(2), GroupID: "development", } diff --git a/cmd/version/version.go b/cmd/version/version.go index 653fbb897..98881b910 100644 --- a/cmd/version/version.go +++ b/cmd/version/version.go @@ -1,6 +1,7 @@ package version import ( + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/cmdio" "github.com/spf13/cobra" @@ -9,7 +10,7 @@ import ( func New() *cobra.Command { cmd := &cobra.Command{ Use: "version", - Args: cobra.NoArgs, + Args: root.NoArgs, Short: "Retrieve information about the current version of this CLI", Annotations: map[string]string{ "template": "Databricks CLI v{{.Version}}\n", diff --git a/cmd/workspace/alerts/alerts.go b/cmd/workspace/alerts/alerts.go index 695fa6a94..d4a7d02af 100755 --- a/cmd/workspace/alerts/alerts.go +++ b/cmd/workspace/alerts/alerts.go @@ -317,7 +317,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 691584db7..1ea50e830 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -137,7 +137,7 @@ func newDeleteApp() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -195,7 +195,7 @@ func newGetApp() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -255,7 +255,7 @@ func newGetAppDeploymentStatus() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -355,7 +355,7 @@ func newGetEvents() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/artifact-allowlists/artifact-allowlists.go b/cmd/workspace/artifact-allowlists/artifact-allowlists.go index 329ca9c3d..fc25e3cb8 100755 --- a/cmd/workspace/artifact-allowlists/artifact-allowlists.go +++ b/cmd/workspace/artifact-allowlists/artifact-allowlists.go @@ -70,7 +70,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -135,7 +135,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go index 4c6e643de..4e198eb46 100755 --- a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go +++ b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go @@ -63,7 +63,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index b08769420..0d0989b97 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -89,13 +89,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -164,7 +164,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -224,7 +224,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -336,7 +336,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index 33facfb95..9466c4b94 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -141,7 +141,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -202,7 +202,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -262,7 +262,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -334,7 +334,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 15a75b1f7..8129db477 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -107,13 +107,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -183,7 +183,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'policy_id' in your JSON input") } @@ -283,13 +283,13 @@ func newEdit() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'policy_id', 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -569,7 +569,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index 8d7737552..70afc609b 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -118,13 +118,13 @@ func newChangeOwner() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id', 'owner_username' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -253,13 +253,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'spark_version' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -348,7 +348,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -512,13 +512,13 @@ func newEdit() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id', 'spark_version' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -611,7 +611,7 @@ func newEvents() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -919,7 +919,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1066,7 +1066,7 @@ func newPermanentDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1158,7 +1158,7 @@ func newPin() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1257,7 +1257,7 @@ func newResize() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1367,7 +1367,7 @@ func newRestart() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1605,7 +1605,7 @@ func newStart() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1709,7 +1709,7 @@ func newUnpin() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index 87ec52beb..bdb266685 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -331,7 +331,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/csp-enablement/csp-enablement.go b/cmd/workspace/csp-enablement/csp-enablement.go index 5e037f2ab..623a7e541 100755 --- a/cmd/workspace/csp-enablement/csp-enablement.go +++ b/cmd/workspace/csp-enablement/csp-enablement.go @@ -66,7 +66,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/dashboard-widgets/dashboard-widgets.go b/cmd/workspace/dashboard-widgets/dashboard-widgets.go index 90463dd00..02b13739a 100755 --- a/cmd/workspace/dashboard-widgets/dashboard-widgets.go +++ b/cmd/workspace/dashboard-widgets/dashboard-widgets.go @@ -128,7 +128,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -186,7 +186,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index 3020cb606..0500ebecf 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -274,7 +274,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/default-namespace/default-namespace.go b/cmd/workspace/default-namespace/default-namespace.go index 38880dd57..89c11d7cd 100755 --- a/cmd/workspace/default-namespace/default-namespace.go +++ b/cmd/workspace/default-namespace/default-namespace.go @@ -78,7 +78,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -133,7 +133,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/esm-enablement/esm-enablement.go b/cmd/workspace/esm-enablement/esm-enablement.go index a3da246fe..be0eed2f8 100755 --- a/cmd/workspace/esm-enablement/esm-enablement.go +++ b/cmd/workspace/esm-enablement/esm-enablement.go @@ -68,7 +68,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 50337390a..e1e00380b 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -114,13 +114,13 @@ func newCreateExperiment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -193,7 +193,7 @@ func newCreateRun() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -261,13 +261,13 @@ func newDeleteExperiment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -336,13 +336,13 @@ func newDeleteRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -419,13 +419,13 @@ func newDeleteRuns() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id', 'max_timestamp_millis' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -502,13 +502,13 @@ func newDeleteTag() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id', 'key' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -585,7 +585,7 @@ func newGetByName() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -643,7 +643,7 @@ func newGetExperiment() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -706,7 +706,7 @@ func newGetHistory() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -761,7 +761,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -820,7 +820,7 @@ func newGetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -885,7 +885,7 @@ func newGetRun() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -946,7 +946,7 @@ func newListArtifacts() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1000,7 +1000,7 @@ func newListExperiments() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1093,7 +1093,7 @@ func newLogBatch() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1159,7 +1159,7 @@ func newLogInputs() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1233,13 +1233,13 @@ func newLogMetric() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'key', 'value', 'timestamp' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -1320,7 +1320,7 @@ func newLogModel() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1393,13 +1393,13 @@ func newLogParam() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'key', 'value' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -1476,13 +1476,13 @@ func newRestoreExperiment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1551,13 +1551,13 @@ func newRestoreRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1634,13 +1634,13 @@ func newRestoreRuns() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id', 'min_timestamp_millis' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -1717,7 +1717,7 @@ func newSearchExperiments() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1785,7 +1785,7 @@ func newSearchRuns() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1853,13 +1853,13 @@ func newSetExperimentTag() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id', 'key', 'value' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -1936,7 +1936,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2011,13 +2011,13 @@ func newSetTag() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'key', 'value' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -2091,13 +2091,13 @@ func newUpdateExperiment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2168,7 +2168,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2236,7 +2236,7 @@ func newUpdateRun() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index 76e460050..a123507ca 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -95,13 +95,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'url', 'credential_name' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -176,7 +176,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -236,7 +236,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -299,7 +299,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -367,7 +367,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index 5b1b90241..e4de29b5a 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -304,7 +304,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index ca8a1c274..c335d4caa 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -86,13 +86,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'git_provider' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/global-init-scripts/global-init-scripts.go b/cmd/workspace/global-init-scripts/global-init-scripts.go index 0461b4514..92dcb2592 100755 --- a/cmd/workspace/global-init-scripts/global-init-scripts.go +++ b/cmd/workspace/global-init-scripts/global-init-scripts.go @@ -85,13 +85,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'script' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -351,13 +351,13 @@ func newUpdate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only SCRIPT_ID as positional arguments. Provide 'name', 'script' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/grants/grants.go b/cmd/workspace/grants/grants.go index 851c3cfbe..876f0343e 100755 --- a/cmd/workspace/grants/grants.go +++ b/cmd/workspace/grants/grants.go @@ -81,7 +81,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -146,7 +146,7 @@ func newGetEffective() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -213,7 +213,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index 2fc632201..14650d984 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -87,7 +87,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -295,7 +295,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index 8000365b0..db96f1466 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -112,13 +112,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_pool_name', 'node_type_id' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -191,7 +191,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_pool_id' in your JSON input") } @@ -293,13 +293,13 @@ func newEdit() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_pool_id', 'instance_pool_name', 'node_type_id' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/instance-profiles/instance-profiles.go b/cmd/workspace/instance-profiles/instance-profiles.go index 919ec511d..7134c16c6 100755 --- a/cmd/workspace/instance-profiles/instance-profiles.go +++ b/cmd/workspace/instance-profiles/instance-profiles.go @@ -83,13 +83,13 @@ func newAdd() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_profile_arn' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -176,13 +176,13 @@ func newEdit() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_profile_arn' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -295,13 +295,13 @@ func newRemove() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_profile_arn' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/ip-access-lists/ip-access-lists.go b/cmd/workspace/ip-access-lists/ip-access-lists.go index 2b6ddfa23..ec5958b5b 100755 --- a/cmd/workspace/ip-access-lists/ip-access-lists.go +++ b/cmd/workspace/ip-access-lists/ip-access-lists.go @@ -117,13 +117,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'label', 'list_type' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -398,13 +398,13 @@ func newReplace() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only IP_ACCESS_LIST_ID as positional arguments. Provide 'label', 'list_type', 'enabled' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 8a98e1c85..17bef3aaa 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -106,7 +106,7 @@ func newCancelAllRuns() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -178,7 +178,7 @@ func newCancelRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") } @@ -349,7 +349,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input") } @@ -442,7 +442,7 @@ func newDeleteRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") } @@ -989,7 +989,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1050,7 +1050,7 @@ func newListRuns() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1127,7 +1127,7 @@ func newRepairRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") } @@ -1316,7 +1316,7 @@ func newRunNow() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input") } @@ -1525,7 +1525,7 @@ func newSubmit() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1614,7 +1614,7 @@ func newUpdate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input") } diff --git a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go index 9559d036d..95a536a05 100755 --- a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go +++ b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go @@ -93,7 +93,7 @@ func newCancelRefresh() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -150,7 +150,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: schedule cmd.Flags().BoolVar(&createReq.SkipBuiltinDashboard, "skip-builtin-dashboard", createReq.SkipBuiltinDashboard, `Whether to skip creating a default dashboard summarizing data quality metrics.`) // TODO: array: slicing_exprs - // TODO: complex arg: snapshot + // TODO: output-only field // TODO: complex arg: time_series cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `Optional argument to specify the warehouse for dashboard creation.`) @@ -179,13 +179,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only FULL_NAME as positional arguments. Provide 'assets_dir', 'output_schema_name' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -267,7 +267,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -336,7 +336,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -404,7 +404,7 @@ func newGetRefresh() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -473,7 +473,7 @@ func newListRefreshes() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -541,7 +541,7 @@ func newRunRefresh() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -596,7 +596,7 @@ func newUpdate() *cobra.Command { // TODO: array: notifications // TODO: complex arg: schedule // TODO: array: slicing_exprs - // TODO: complex arg: snapshot + // TODO: output-only field // TODO: complex arg: time_series cmd.Use = "update FULL_NAME OUTPUT_SCHEMA_NAME" @@ -625,13 +625,13 @@ func newUpdate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only FULL_NAME as positional arguments. Provide 'output_schema_name' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index a81483997..df42e7192 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -70,7 +70,7 @@ func newPublish() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/libraries/libraries.go b/cmd/workspace/libraries/libraries.go index d6761a821..e11e5a4c5 100755 --- a/cmd/workspace/libraries/libraries.go +++ b/cmd/workspace/libraries/libraries.go @@ -146,7 +146,7 @@ func newClusterStatus() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index 97e77a479..dd40bf92b 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -94,13 +94,13 @@ func newAssign() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only WORKSPACE_ID as positional arguments. Provide 'metastore_id', 'default_catalog_name' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -183,13 +183,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -525,7 +525,7 @@ func newUnassign() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index 74e5e66e3..41f06ac4d 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -125,13 +125,13 @@ func newApproveTransitionRequest() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage', 'archive_existing_versions' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -219,13 +219,13 @@ func newCreateComment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'comment' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -306,13 +306,13 @@ func newCreateModel() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -387,13 +387,13 @@ func newCreateModelVersion() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'source' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -477,13 +477,13 @@ func newCreateTransitionRequest() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -622,7 +622,7 @@ func newDeleteComment() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -680,7 +680,7 @@ func newDeleteModel() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -740,7 +740,7 @@ func newDeleteModelTag() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -800,7 +800,7 @@ func newDeleteModelVersion() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -862,7 +862,7 @@ func newDeleteModelVersionTag() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -937,7 +937,7 @@ func newDeleteTransitionRequest() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -1002,7 +1002,7 @@ func newDeleteWebhook() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1063,13 +1063,13 @@ func newGetLatestVersions() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1136,7 +1136,7 @@ func newGetModel() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1195,7 +1195,7 @@ func newGetModelVersion() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -1255,7 +1255,7 @@ func newGetModelVersionDownloadUri() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -1314,7 +1314,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1373,7 +1373,7 @@ func newGetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1432,7 +1432,7 @@ func newListModels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1486,7 +1486,7 @@ func newListTransitionRequests() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -1545,7 +1545,7 @@ func newListWebhooks() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1613,13 +1613,13 @@ func newRejectTransitionRequest() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -1699,13 +1699,13 @@ func newRenameModel() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1773,7 +1773,7 @@ func newSearchModelVersions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1828,7 +1828,7 @@ func newSearchModels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1891,13 +1891,13 @@ func newSetModelTag() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'key', 'value' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -1980,13 +1980,13 @@ func newSetModelVersionTag() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'key', 'value' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -2066,7 +2066,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2150,13 +2150,13 @@ func newTestRegistryWebhook() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2243,13 +2243,13 @@ func newTransitionStage() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage', 'archive_existing_versions' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -2334,13 +2334,13 @@ func newUpdateComment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'id', 'comment' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -2414,13 +2414,13 @@ func newUpdateModel() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2492,13 +2492,13 @@ func newUpdateModelVersion() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -2572,7 +2572,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2647,13 +2647,13 @@ func newUpdateWebhook() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index b322e8807..a606b01df 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -83,7 +83,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -151,7 +151,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -219,7 +219,7 @@ func newGetByAlias() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -293,7 +293,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -360,7 +360,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/online-tables/online-tables.go b/cmd/workspace/online-tables/online-tables.go index 2a5574da9..a1e21e0f1 100755 --- a/cmd/workspace/online-tables/online-tables.go +++ b/cmd/workspace/online-tables/online-tables.go @@ -69,7 +69,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -134,7 +134,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -192,7 +192,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/permission-migration/permission-migration.go b/cmd/workspace/permission-migration/permission-migration.go index a957d5ca3..40d3f9a3b 100755 --- a/cmd/workspace/permission-migration/permission-migration.go +++ b/cmd/workspace/permission-migration/permission-migration.go @@ -80,13 +80,13 @@ func newMigratePermissions() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'workspace_id', 'from_workspace_group_name', 'to_account_group_name' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index 5bf837e35..38a3bf9c0 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -118,7 +118,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -178,7 +178,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -246,7 +246,7 @@ func newSet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -320,7 +320,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 78f42d6cd..b7c3235f8 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -432,7 +432,7 @@ func newGetUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -562,7 +562,7 @@ func newListPipelines() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/policy-families/policy-families.go b/cmd/workspace/policy-families/policy-families.go index f6c07bf70..beee6e963 100755 --- a/cmd/workspace/policy-families/policy-families.go +++ b/cmd/workspace/policy-families/policy-families.go @@ -69,7 +69,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -127,7 +127,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index 93f89c981..7305191c8 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -81,13 +81,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'authentication_type' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -304,7 +304,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index f2ab6f59c..0126097fc 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -284,7 +284,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/query-history/query-history.go b/cmd/workspace/query-history/query-history.go index a0402e6d0..60d6004d9 100755 --- a/cmd/workspace/query-history/query-history.go +++ b/cmd/workspace/query-history/query-history.go @@ -67,7 +67,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/query-visualizations/query-visualizations.go b/cmd/workspace/query-visualizations/query-visualizations.go index 4161ac7d5..c94d83a82 100755 --- a/cmd/workspace/query-visualizations/query-visualizations.go +++ b/cmd/workspace/query-visualizations/query-visualizations.go @@ -128,7 +128,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -186,7 +186,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/recipient-activation/recipient-activation.go b/cmd/workspace/recipient-activation/recipient-activation.go index 068e6bb10..457fa9042 100755 --- a/cmd/workspace/recipient-activation/recipient-activation.go +++ b/cmd/workspace/recipient-activation/recipient-activation.go @@ -73,7 +73,7 @@ func newGetActivationUrlInfo() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -132,7 +132,7 @@ func newRetrieveToken() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 797863137..c21d8a8c0 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -101,13 +101,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'authentication_type' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -324,7 +324,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -385,13 +385,13 @@ func newRotateToken() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only NAME as positional arguments. Provide 'existing_token_expire_in_seconds' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index 6cd01c137..5d0d26736 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -119,13 +119,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'catalog_name', 'schema_name', 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -279,7 +279,7 @@ func newDeleteAlias() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -426,7 +426,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -489,13 +489,13 @@ func newSetAlias() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(2)(cmd, args) + err := root.ExactArgs(2)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only FULL_NAME, ALIAS as positional arguments. Provide 'version_num' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/repos/overrides.go b/cmd/workspace/repos/overrides.go index f6f26f81d..96d645efb 100644 --- a/cmd/workspace/repos/overrides.go +++ b/cmd/workspace/repos/overrides.go @@ -25,7 +25,7 @@ func createOverride(createCmd *cobra.Command, createReq *workspace.CreateRepo) { // If the provider argument is not specified, we try to detect it from the URL. check := cobra.RangeArgs(1, 2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + check = root.ExactArgs(0) } return check(cmd, args) } diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index 6a989437a..fb3d51b06 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -94,13 +94,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'url', 'provider' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -457,7 +457,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go b/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go index d6b8a8424..e0ca8030f 100755 --- a/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go +++ b/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go @@ -78,7 +78,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -133,7 +133,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index a5efeed37..6d9d26f5a 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -84,13 +84,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'catalog_name' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -311,7 +311,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/secrets/put_secret.go b/cmd/workspace/secrets/put_secret.go index 2fbf49c5c..e323c7a10 100644 --- a/cmd/workspace/secrets/put_secret.go +++ b/cmd/workspace/secrets/put_secret.go @@ -50,9 +50,9 @@ func newPutSecret() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + check = root.ExactArgs(0) } return check(cmd, args) } diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index 35b84907e..981062dfb 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -95,13 +95,13 @@ func newCreateScope() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -176,13 +176,13 @@ func newDeleteAcl() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope', 'principal' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -258,13 +258,13 @@ func newDeleteScope() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -339,13 +339,13 @@ func newDeleteSecret() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope', 'key' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -420,7 +420,7 @@ func newGetAcl() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -491,7 +491,7 @@ func newGetSecret() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -555,7 +555,7 @@ func newListAcls() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -659,7 +659,7 @@ func newListSecrets() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -744,13 +744,13 @@ func newPutAcl() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope', 'principal', 'permission' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index d363a1ba1..957cb1265 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -85,7 +85,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -294,7 +294,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index c4ca7d62d..45dff030a 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -97,7 +97,7 @@ func newBuildLogs() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -232,7 +232,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -292,7 +292,7 @@ func newExportMetrics() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -350,7 +350,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -408,7 +408,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -467,7 +467,7 @@ func newGetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -566,7 +566,7 @@ func newLogs() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -632,7 +632,7 @@ func newPatch() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -702,7 +702,7 @@ func newPut() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -780,7 +780,7 @@ func newQuery() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -849,7 +849,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -928,7 +928,7 @@ func newUpdateConfig() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1010,7 +1010,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 38e19e839..8ba0335fb 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -25,6 +25,9 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "settings", }, + + // This service is being previewed; hide from help output. + Hidden: true, } // Add subservices diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index b849f84f7..0e3523cec 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -83,13 +83,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -156,7 +156,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -217,7 +217,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -317,7 +317,7 @@ func newSharePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -395,7 +395,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -467,7 +467,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index b763d1934..3164baa2b 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -78,7 +78,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `Comment associated with the credential.`) - // TODO: complex arg: databricks_gcp_service_account + // TODO: output-only field cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Supplying true to this argument skips validation of the created credential.`) @@ -95,13 +95,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -317,7 +317,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -365,7 +365,7 @@ func newUpdate() *cobra.Command { // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`) - // TODO: complex arg: databricks_gcp_service_account + // TODO: output-only field cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`) @@ -454,7 +454,7 @@ func newValidate() *cobra.Command { // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token - // TODO: complex arg: databricks_gcp_service_account + // TODO: output-only field cmd.Flags().StringVar(&validateReq.ExternalLocationName, "external-location-name", validateReq.ExternalLocationName, `The name of an existing external location to validate.`) cmd.Flags().BoolVar(&validateReq.ReadOnly, "read-only", validateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) cmd.Flags().StringVar(&validateReq.StorageCredentialName, "storage-credential-name", validateReq.StorageCredentialName, `The name of the storage credential to validate.`) @@ -480,7 +480,7 @@ func newValidate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/system-schemas/system-schemas.go b/cmd/workspace/system-schemas/system-schemas.go index d8135ac2a..070701d2f 100755 --- a/cmd/workspace/system-schemas/system-schemas.go +++ b/cmd/workspace/system-schemas/system-schemas.go @@ -71,7 +71,7 @@ func newDisable() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -135,7 +135,7 @@ func newEnable() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -198,7 +198,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/table-constraints/table-constraints.go b/cmd/workspace/table-constraints/table-constraints.go index d5597ab33..166da146c 100755 --- a/cmd/workspace/table-constraints/table-constraints.go +++ b/cmd/workspace/table-constraints/table-constraints.go @@ -159,7 +159,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index 1ee6b0d52..793fb7a2b 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -320,7 +320,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index 5209ff16d..dea94edb0 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -81,7 +81,7 @@ func newCreateOboToken() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'application_id' in your JSON input") } @@ -393,7 +393,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -448,7 +448,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -513,7 +513,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/tokens/tokens.go b/cmd/workspace/tokens/tokens.go index bdb99d601..afe4b9a03 100755 --- a/cmd/workspace/tokens/tokens.go +++ b/cmd/workspace/tokens/tokens.go @@ -74,7 +74,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -143,7 +143,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'token_id' in your JSON input") } diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index 676b10a08..53ba2e85c 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -97,7 +97,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -399,7 +399,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -536,7 +536,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -692,7 +692,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go index a8d3d3ee8..dd9d57835 100755 --- a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go +++ b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go @@ -79,13 +79,13 @@ func newCreateEndpoint() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'endpoint_type' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -174,7 +174,7 @@ func newDeleteEndpoint() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -230,7 +230,7 @@ func newGetEndpoint() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -285,7 +285,7 @@ func newListEndpoints() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index a9b9f51df..4e117e5bd 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -96,13 +96,13 @@ func newCreateIndex() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'endpoint_name', 'primary_key', 'index_type' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -183,7 +183,7 @@ func newDeleteDataVectorIndex() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -249,7 +249,7 @@ func newDeleteIndex() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -307,7 +307,7 @@ func newGetIndex() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -367,7 +367,7 @@ func newListIndexes() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -429,7 +429,7 @@ func newQueryIndex() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -495,7 +495,7 @@ func newSyncIndex() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -558,13 +558,13 @@ func newUpsertDataVectorIndex() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only INDEX_NAME as positional arguments. Provide 'inputs_json' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 5a2991b90..335b7d011 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -103,13 +103,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'catalog_name', 'schema_name', 'name', 'volume_type' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -275,7 +275,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index 3d1f05439..cdf106365 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -99,7 +99,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -625,7 +625,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -769,7 +769,7 @@ func newSetWorkspaceWarehouseConfig() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } diff --git a/cmd/workspace/workspace-bindings/workspace-bindings.go b/cmd/workspace/workspace-bindings/workspace-bindings.go index 3543f1e9d..b7e0614ea 100755 --- a/cmd/workspace/workspace-bindings/workspace-bindings.go +++ b/cmd/workspace/workspace-bindings/workspace-bindings.go @@ -85,7 +85,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -145,7 +145,7 @@ func newGetBindings() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -210,7 +210,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -281,7 +281,7 @@ func newUpdateBindings() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/cmd/workspace/workspace-conf/workspace-conf.go b/cmd/workspace/workspace-conf/workspace-conf.go index 87ea86c8e..92b2f0f3a 100755 --- a/cmd/workspace/workspace-conf/workspace-conf.go +++ b/cmd/workspace/workspace-conf/workspace-conf.go @@ -64,7 +64,7 @@ func newGetStatus() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } diff --git a/cmd/workspace/workspace/export_dir.go b/cmd/workspace/workspace/export_dir.go index 79e64e8ad..0b53666f9 100644 --- a/cmd/workspace/workspace/export_dir.go +++ b/cmd/workspace/workspace/export_dir.go @@ -94,7 +94,7 @@ func newExportDir() *cobra.Command { ` cmd.Annotations = make(map[string]string) - cmd.Args = cobra.ExactArgs(2) + cmd.Args = root.ExactArgs(2) cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { diff --git a/cmd/workspace/workspace/import_dir.go b/cmd/workspace/workspace/import_dir.go index 6ce5f3c2b..19d9a0a17 100644 --- a/cmd/workspace/workspace/import_dir.go +++ b/cmd/workspace/workspace/import_dir.go @@ -119,7 +119,7 @@ Notebooks will have their extensions (one of .scala, .py, .sql, .ipynb, .r) stri ` cmd.Annotations = make(map[string]string) - cmd.Args = cobra.ExactArgs(2) + cmd.Args = root.ExactArgs(2) cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 42517c432..183cac898 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -91,7 +91,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'path' in your JSON input") } @@ -266,7 +266,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -327,7 +327,7 @@ func newGetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -387,7 +387,7 @@ func newGetStatus() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -466,13 +466,13 @@ func newImport() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'path' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -542,7 +542,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -607,7 +607,7 @@ func newMkdirs() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'path' in your JSON input") } @@ -700,7 +700,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -771,7 +771,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } diff --git a/internal/clusters_test.go b/internal/clusters_test.go index e8208d047..6daddcce3 100644 --- a/internal/clusters_test.go +++ b/internal/clusters_test.go @@ -36,5 +36,5 @@ func TestAccClustersGet(t *testing.T) { func TestClusterCreateErrorWhenNoArguments(t *testing.T) { _, _, err := RequireErrorRun(t, "clusters", "create") - assert.Equal(t, "accepts 1 arg(s), received 0", err.Error()) + assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } diff --git a/internal/secrets_test.go b/internal/secrets_test.go index b030071bb..d9c03f096 100644 --- a/internal/secrets_test.go +++ b/internal/secrets_test.go @@ -14,7 +14,7 @@ import ( func TestSecretsCreateScopeErrWhenNoArguments(t *testing.T) { _, _, err := RequireErrorRun(t, "secrets", "create-scope") - assert.Equal(t, "accepts 1 arg(s), received 0", err.Error()) + assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func temporarySecretScope(ctx context.Context, t *acc.WorkspaceT) string { diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 164677390..6ca8cd4fb 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -34,12 +34,12 @@ func TestAccWorkspaceList(t *testing.T) { func TestWorkpaceListErrorWhenNoArguments(t *testing.T) { _, _, err := RequireErrorRun(t, "workspace", "list") - assert.Equal(t, "accepts 1 arg(s), received 0", err.Error()) + assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) { _, _, err := RequireErrorRun(t, "workspace", "get-status") - assert.Equal(t, "accepts 1 arg(s), received 0", err.Error()) + assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func TestWorkpaceExportPrintsContents(t *testing.T) { From d4329f470fe5666b4b811f93949c798710a4edcf Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 12 Mar 2024 19:45:54 +0530 Subject: [PATCH 079/286] Add integration test for mlops-stacks initialization (#1155) ## Changes This PR: 1. Adds an integration test for mlops-stacks that checks the initialization and deployment of the project was successful. 2. Fixes a bug in the initialization of templates from non-tty. We need to process the input parameters in order since their descriptions can refer to input parameters that came before in the interactive UX. ## Tests The integration test passes in CI. --- bundle/bundle_test.go | 5 +-- bundle/root_test.go | 35 +++++------------- internal/init_test.go | 77 ++++++++++++++++++++++++++++++++++++++++ internal/testutil/env.go | 23 ++++++++++++ libs/template/config.go | 5 ++- 5 files changed, 115 insertions(+), 30 deletions(-) diff --git a/bundle/bundle_test.go b/bundle/bundle_test.go index 43477efd1..887a4ee83 100644 --- a/bundle/bundle_test.go +++ b/bundle/bundle_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -86,7 +87,7 @@ func TestBundleMustLoadFailureWithEnv(t *testing.T) { } func TestBundleMustLoadFailureIfNotFound(t *testing.T) { - chdir(t, t.TempDir()) + testutil.Chdir(t, t.TempDir()) _, err := MustLoad(context.Background()) require.Error(t, err, "unable to find bundle root") } @@ -105,7 +106,7 @@ func TestBundleTryLoadFailureWithEnv(t *testing.T) { } func TestBundleTryLoadOkIfNotFound(t *testing.T) { - chdir(t, t.TempDir()) + testutil.Chdir(t, t.TempDir()) b, err := TryLoad(context.Background()) assert.NoError(t, err) assert.Nil(t, b) diff --git a/bundle/root_test.go b/bundle/root_test.go index 88113546c..e6c53e824 100644 --- a/bundle/root_test.go +++ b/bundle/root_test.go @@ -8,30 +8,11 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// Changes into specified directory for the duration of the test. -// Returns the current working directory. -func chdir(t *testing.T, dir string) string { - wd, err := os.Getwd() - require.NoError(t, err) - - abs, err := filepath.Abs(dir) - require.NoError(t, err) - - err = os.Chdir(abs) - require.NoError(t, err) - - t.Cleanup(func() { - err := os.Chdir(wd) - require.NoError(t, err) - }) - - return wd -} - func TestRootFromEnv(t *testing.T) { ctx := context.Background() dir := t.TempDir() @@ -83,7 +64,7 @@ func TestRootLookup(t *testing.T) { t.Setenv(env.RootVariable, "") os.Unsetenv(env.RootVariable) - chdir(t, t.TempDir()) + testutil.Chdir(t, t.TempDir()) // Create databricks.yml file. f, err := os.Create(config.FileNames[0]) @@ -95,7 +76,7 @@ func TestRootLookup(t *testing.T) { require.NoError(t, err) // It should find the project root from $PWD. - wd := chdir(t, "./a/b/c") + wd := testutil.Chdir(t, "./a/b/c") root, err := mustGetRoot(ctx) require.NoError(t, err) require.Equal(t, wd, root) @@ -109,14 +90,14 @@ func TestRootLookupError(t *testing.T) { os.Unsetenv(env.RootVariable) // It can't find a project root from a temporary directory. - _ = chdir(t, t.TempDir()) + _ = testutil.Chdir(t, t.TempDir()) _, err := mustGetRoot(ctx) require.ErrorContains(t, err, "unable to locate bundle root") } func TestLoadYamlWhenIncludesEnvPresent(t *testing.T) { ctx := context.Background() - chdir(t, filepath.Join(".", "tests", "basic")) + testutil.Chdir(t, filepath.Join(".", "tests", "basic")) t.Setenv(env.IncludesVariable, "test") bundle, err := MustLoad(ctx) @@ -131,7 +112,7 @@ func TestLoadYamlWhenIncludesEnvPresent(t *testing.T) { func TestLoadDefautlBundleWhenNoYamlAndRootAndIncludesEnvPresent(t *testing.T) { ctx := context.Background() dir := t.TempDir() - chdir(t, dir) + testutil.Chdir(t, dir) t.Setenv(env.RootVariable, dir) t.Setenv(env.IncludesVariable, "test") @@ -143,7 +124,7 @@ func TestLoadDefautlBundleWhenNoYamlAndRootAndIncludesEnvPresent(t *testing.T) { func TestErrorIfNoYamlNoRootEnvAndIncludesEnvPresent(t *testing.T) { ctx := context.Background() dir := t.TempDir() - chdir(t, dir) + testutil.Chdir(t, dir) t.Setenv(env.IncludesVariable, "test") _, err := MustLoad(ctx) @@ -153,7 +134,7 @@ func TestErrorIfNoYamlNoRootEnvAndIncludesEnvPresent(t *testing.T) { func TestErrorIfNoYamlNoIncludesEnvAndRootEnvPresent(t *testing.T) { ctx := context.Background() dir := t.TempDir() - chdir(t, dir) + testutil.Chdir(t, dir) t.Setenv(env.RootVariable, dir) _, err := MustLoad(ctx) diff --git a/internal/init_test.go b/internal/init_test.go index c4c3d6d84..45e6d031a 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -2,11 +2,15 @@ package internal import ( "context" + "encoding/json" + "fmt" "os" "path/filepath" "strconv" "testing" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/auth" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" @@ -21,6 +25,79 @@ func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { assert.EqualError(t, err, "failed to compute file content for bar.tmpl. variable \"does_not_exist\" not defined") } +// This test tests the MLOps Stacks DAB e2e and thus there's a couple of special +// considerations to take note of: +// +// 1. Upstream changes to the MLOps Stacks DAB can cause this test to fail. +// In which case we should do one of: +// (a) Update this test to reflect the changes +// (b) Update the MLOps Stacks DAB to not break this test. Skip this test +// temporarily until the MLOps Stacks DAB is updated +// +// 2. While rare and to be avoided if possible, the CLI reserves the right to +// make changes that can break the MLOps Stacks DAB. In which case we should +// skip this test until the MLOps Stacks DAB is updated to work again. +func TestAccBundleInitOnMlopsStacks(t *testing.T) { + t.Parallel() + env := GetEnvOrSkipTest(t, "CLOUD_ENV") + tmpDir1 := t.TempDir() + tmpDir2 := t.TempDir() + + w, err := databricks.NewWorkspaceClient(&databricks.Config{}) + require.NoError(t, err) + + projectName := RandomName("project_name_") + + // Create a config file with the project name and root dir + initConfig := map[string]string{ + "input_project_name": projectName, + "input_root_dir": "repo_name", + "input_include_models_in_unity_catalog": "no", + "input_cloud": env, + } + b, err := json.Marshal(initConfig) + require.NoError(t, err) + os.WriteFile(filepath.Join(tmpDir1, "config.json"), b, 0644) + + // Run bundle init + assert.NoFileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) + RequireSuccessfulRun(t, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) + + // Assert that the README.md file was created + assert.FileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) + assertLocalFileContents(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md"), fmt.Sprintf("# %s", projectName)) + + // Validate the stack + testutil.Chdir(t, filepath.Join(tmpDir2, "repo_name", projectName)) + RequireSuccessfulRun(t, "bundle", "validate") + + // Deploy the stack + RequireSuccessfulRun(t, "bundle", "deploy") + t.Cleanup(func() { + // Delete the stack + RequireSuccessfulRun(t, "bundle", "destroy", "--auto-approve") + }) + + // Get summary of the bundle deployment + stdout, _ := RequireSuccessfulRun(t, "bundle", "summary", "--output", "json") + summary := &config.Root{} + err = json.Unmarshal(stdout.Bytes(), summary) + require.NoError(t, err) + + // Assert resource Ids are not empty + assert.NotEmpty(t, summary.Resources.Experiments["experiment"].ID) + assert.NotEmpty(t, summary.Resources.Models["model"].ID) + assert.NotEmpty(t, summary.Resources.Jobs["batch_inference_job"].ID) + assert.NotEmpty(t, summary.Resources.Jobs["model_training_job"].ID) + + // Assert the batch inference job actually exists + batchJobId, err := strconv.ParseInt(summary.Resources.Jobs["batch_inference_job"].ID, 10, 64) + require.NoError(t, err) + job, err := w.Jobs.GetByJobId(context.Background(), batchJobId) + assert.NoError(t, err) + assert.Equal(t, fmt.Sprintf("dev-%s-batch-inference-job", projectName), job.Settings.Name) +} + func TestAccBundleInitHelpers(t *testing.T) { env := GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) diff --git a/internal/testutil/env.go b/internal/testutil/env.go index 39201c5b4..e1973ba82 100644 --- a/internal/testutil/env.go +++ b/internal/testutil/env.go @@ -2,9 +2,12 @@ package testutil import ( "os" + "path/filepath" "runtime" "strings" "testing" + + "github.com/stretchr/testify/require" ) // CleanupEnvironment sets up a pristine environment containing only $PATH and $HOME. @@ -44,3 +47,23 @@ func GetEnvOrSkipTest(t *testing.T, name string) string { } return value } + +// Changes into specified directory for the duration of the test. +// Returns the current working directory. +func Chdir(t *testing.T, dir string) string { + wd, err := os.Getwd() + require.NoError(t, err) + + abs, err := filepath.Abs(dir) + require.NoError(t, err) + + err = os.Chdir(abs) + require.NoError(t, err) + + t.Cleanup(func() { + err := os.Chdir(wd) + require.NoError(t, err) + }) + + return wd +} diff --git a/libs/template/config.go b/libs/template/config.go index 5dd038e01..970e74ca9 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -89,7 +89,10 @@ func (c *config) assignValuesFromFile(path string) error { // Assigns default values from schema to input config map func (c *config) assignDefaultValues(r *renderer) error { - for name, property := range c.schema.Properties { + for _, p := range c.schema.OrderedProperties() { + name := p.Name + property := p.Schema + // Config already has a value assigned if _, ok := c.values[name]; ok { continue From 5f29b5ecd94ead7c5feecf2066852635141e2559 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 13 Mar 2024 18:29:49 +0530 Subject: [PATCH 080/286] Fix TestAccBundleInitOnMlopsStacks to work on aws-prod-ucws (#1283) ## Changes aws-prod-ucws has CLOUD_ENV set to "ucws" which was failing the validation checks in the template itself. This PR fixes the test. ## Tests The tests pass now --- internal/init_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/internal/init_test.go b/internal/init_test.go index 45e6d031a..bed1119f8 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "strconv" + "strings" "testing" "github.com/databricks/cli/bundle/config" @@ -39,7 +40,8 @@ func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { // skip this test until the MLOps Stacks DAB is updated to work again. func TestAccBundleInitOnMlopsStacks(t *testing.T) { t.Parallel() - env := GetEnvOrSkipTest(t, "CLOUD_ENV") + env := testutil.GetCloud(t).String() + tmpDir1 := t.TempDir() tmpDir2 := t.TempDir() @@ -53,7 +55,7 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { "input_project_name": projectName, "input_root_dir": "repo_name", "input_include_models_in_unity_catalog": "no", - "input_cloud": env, + "input_cloud": strings.ToLower(env), } b, err := json.Marshal(initConfig) require.NoError(t, err) From e22dd8af7dfacdbca6e03012647975fe744d5d52 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 14 Mar 2024 13:56:21 +0100 Subject: [PATCH 081/286] Enabled incorrectly skipped tests (#1280) ## Changes Some integration tests were missing `TestAcc` prefix therefore were skipped. ## Tests Running integration tests --- internal/repos_test.go | 12 ++++++------ internal/secrets_test.go | 4 ++-- internal/storage_credentials_test.go | 6 +++++- internal/workspace_test.go | 2 +- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/internal/repos_test.go b/internal/repos_test.go index 340de3347..de0d926ad 100644 --- a/internal/repos_test.go +++ b/internal/repos_test.go @@ -43,7 +43,7 @@ func createTemporaryRepo(t *testing.T, w *databricks.WorkspaceClient, ctx contex return repoInfo.Id, repoPath } -func TestReposCreateWithProvider(t *testing.T) { +func TestAccReposCreateWithProvider(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -60,7 +60,7 @@ func TestReposCreateWithProvider(t *testing.T) { assert.Equal(t, workspace.ObjectTypeRepo, oi.ObjectType) } -func TestReposCreateWithoutProvider(t *testing.T) { +func TestAccReposCreateWithoutProvider(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -77,7 +77,7 @@ func TestReposCreateWithoutProvider(t *testing.T) { assert.Equal(t, workspace.ObjectTypeRepo, oi.ObjectType) } -func TestReposGet(t *testing.T) { +func TestAccReposGet(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -106,7 +106,7 @@ func TestReposGet(t *testing.T) { assert.ErrorContains(t, err, "is not a repo") } -func TestReposUpdate(t *testing.T) { +func TestAccReposUpdate(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -127,7 +127,7 @@ func TestReposUpdate(t *testing.T) { assert.Equal(t, byIdOutput.String(), byPathOutput.String()) } -func TestReposDeleteByID(t *testing.T) { +func TestAccReposDeleteByID(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -146,7 +146,7 @@ func TestReposDeleteByID(t *testing.T) { assert.True(t, apierr.IsMissing(err), err) } -func TestReposDeleteByPath(t *testing.T) { +func TestAccReposDeleteByPath(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() diff --git a/internal/secrets_test.go b/internal/secrets_test.go index d9c03f096..59e5d6150 100644 --- a/internal/secrets_test.go +++ b/internal/secrets_test.go @@ -61,7 +61,7 @@ func assertSecretBytesValue(t *acc.WorkspaceT, scope, key string, expected []byt assert.Equal(t, expected, decoded) } -func TestSecretsPutSecretStringValue(tt *testing.T) { +func TestAccSecretsPutSecretStringValue(tt *testing.T) { ctx, t := acc.WorkspaceTest(tt) scope := temporarySecretScope(ctx, t) key := "test-key" @@ -75,7 +75,7 @@ func TestSecretsPutSecretStringValue(tt *testing.T) { assertSecretBytesValue(t, scope, key, []byte(value)) } -func TestSecretsPutSecretBytesValue(tt *testing.T) { +func TestAccSecretsPutSecretBytesValue(tt *testing.T) { ctx, t := acc.WorkspaceTest(tt) scope := temporarySecretScope(ctx, t) key := "test-key" diff --git a/internal/storage_credentials_test.go b/internal/storage_credentials_test.go index 250ad3399..07c21861f 100644 --- a/internal/storage_credentials_test.go +++ b/internal/storage_credentials_test.go @@ -7,8 +7,12 @@ import ( "github.com/stretchr/testify/assert" ) -func TestStorageCredentialsListRendersResponse(t *testing.T) { +func TestAccStorageCredentialsListRendersResponse(t *testing.T) { _, _ = acc.WorkspaceTest(t) + + // Check if metastore is assigned for the workspace, otherwise test will fail + t.Log(GetEnvOrSkipTest(t, "TEST_METASTORE_ID")) + stdout, stderr := RequireSuccessfulRun(t, "storage-credentials", "list") assert.NotEmpty(t, stdout) assert.Empty(t, stderr) diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 6ca8cd4fb..bc354914f 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -42,7 +42,7 @@ func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) { assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } -func TestWorkpaceExportPrintsContents(t *testing.T) { +func TestAccWorkpaceExportPrintsContents(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() From c49c8cc04cb23fb0dfeb886af23f6e5c18a9ab8c Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 18 Mar 2024 13:48:50 +0100 Subject: [PATCH 082/286] Update actions/setup-python to v5 (#1290) ## Changes This addresses a Node 16 deprecation warning in our GHA output. --- .github/workflows/push.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 26f85982f..428ecd595 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -36,7 +36,7 @@ jobs: go-version: 1.21.x - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.9' From 4f2c6150e7209229bc8cc8ea9b51afd64dbba280 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 18 Mar 2024 13:48:59 +0100 Subject: [PATCH 083/286] Update codecov/codecov-action to v4 (#1291) ## Changes This addresses a Node 16 deprecation warning in our GHA output. Full release notes of v4 at https://github.com/codecov/codecov-action/releases/tag/v4.0.0 --- .github/workflows/push.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 428ecd595..18ba54a37 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -56,7 +56,7 @@ jobs: run: make test - name: Publish test coverage - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 fmt: runs-on: ubuntu-latest From 1b0ac6109341eeecd23bfa6e51bd9acad0007aa4 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 18 Mar 2024 15:41:58 +0100 Subject: [PATCH 084/286] Added deployment state for bundles (#1267) ## Changes This PR introduces new structure (and a file) being used locally and synced remotely to Databricks workspace to track bundle deployment related metadata. The state is pulled from remote, updated and pushed back remotely as part of `bundle deploy` command. This state can be used for deployment sequencing as it's `Version` field is monotonically increasing on each deployment. Currently, it only tracks files being synced as part of the deployment. This helps fix the issue with files not being removed during deployments on CI/CD as sync snapshot was never present there. Fixes #943 ## Tests Added E2E (regression) test for files removal on CI/CD --------- Co-authored-by: Pieter Noordhuis --- .../mutator/process_root_includes_test.go | 24 +- bundle/deploy/filer.go | 14 + bundle/deploy/files/delete.go | 2 +- bundle/deploy/files/sync.go | 23 +- bundle/deploy/files/upload.go | 2 +- bundle/deploy/state.go | 174 +++++++ bundle/deploy/state_pull.go | 126 +++++ bundle/deploy/state_pull_test.go | 457 ++++++++++++++++++ bundle/deploy/state_push.go | 49 ++ bundle/deploy/state_push_test.go | 82 ++++ bundle/deploy/state_test.go | 79 +++ bundle/deploy/state_update.go | 108 +++++ bundle/deploy/state_update_test.go | 149 ++++++ bundle/deploy/terraform/filer.go | 14 - bundle/deploy/terraform/state_pull.go | 7 +- bundle/deploy/terraform/state_push.go | 7 +- bundle/deploy/terraform/state_test.go | 5 +- bundle/phases/deploy.go | 3 + cmd/bundle/sync.go | 25 +- cmd/sync/sync.go | 25 +- internal/bundle/deployment_state_test.go | 102 ++++ internal/bundle/helpers.go | 11 + internal/bundle/job_metadata_test.go | 5 +- internal/testutil/helpers.go | 26 + libs/fileset/file.go | 56 +++ libs/fileset/file_test.go | 39 ++ libs/fileset/fileset.go | 2 +- libs/sync/snapshot.go | 24 + libs/sync/snapshot_state.go | 13 +- libs/sync/sync.go | 4 +- libs/sync/sync_test.go | 8 +- 31 files changed, 1569 insertions(+), 96 deletions(-) create mode 100644 bundle/deploy/filer.go create mode 100644 bundle/deploy/state.go create mode 100644 bundle/deploy/state_pull.go create mode 100644 bundle/deploy/state_pull_test.go create mode 100644 bundle/deploy/state_push.go create mode 100644 bundle/deploy/state_push_test.go create mode 100644 bundle/deploy/state_test.go create mode 100644 bundle/deploy/state_update.go create mode 100644 bundle/deploy/state_update_test.go delete mode 100644 bundle/deploy/terraform/filer.go create mode 100644 internal/bundle/deployment_state_test.go create mode 100644 internal/testutil/helpers.go create mode 100644 libs/fileset/file_test.go diff --git a/bundle/config/mutator/process_root_includes_test.go b/bundle/config/mutator/process_root_includes_test.go index 88a6c7433..645eb89a9 100644 --- a/bundle/config/mutator/process_root_includes_test.go +++ b/bundle/config/mutator/process_root_includes_test.go @@ -4,7 +4,6 @@ import ( "context" "os" "path" - "path/filepath" "runtime" "strings" "testing" @@ -13,16 +12,11 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func touch(t *testing.T, path, file string) { - f, err := os.Create(filepath.Join(path, file)) - require.NoError(t, err) - f.Close() -} - func TestProcessRootIncludesEmpty(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ @@ -64,9 +58,9 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) { }, } - touch(t, b.Config.Path, "databricks.yml") - touch(t, b.Config.Path, "a.yml") - touch(t, b.Config.Path, "b.yml") + testutil.Touch(t, b.Config.Path, "databricks.yml") + testutil.Touch(t, b.Config.Path, "a.yml") + testutil.Touch(t, b.Config.Path, "b.yml") err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, err) @@ -85,8 +79,8 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) { }, } - touch(t, b.Config.Path, "a1.yml") - touch(t, b.Config.Path, "b1.yml") + testutil.Touch(t, b.Config.Path, "a1.yml") + testutil.Touch(t, b.Config.Path, "b1.yml") err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, err) @@ -105,7 +99,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) { }, } - touch(t, b.Config.Path, "a.yml") + testutil.Touch(t, b.Config.Path, "a.yml") err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, err) @@ -129,7 +123,7 @@ func TestProcessRootIncludesNotExists(t *testing.T) { func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) { rootPath := t.TempDir() testYamlName := "extra_include_path.yml" - touch(t, rootPath, testYamlName) + testutil.Touch(t, rootPath, testYamlName) t.Setenv(env.IncludesVariable, path.Join(rootPath, testYamlName)) b := &bundle.Bundle{ @@ -146,7 +140,7 @@ func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) { func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) { rootPath := t.TempDir() testYamlName := "extra_include_path.yml" - touch(t, rootPath, testYamlName) + testutil.Touch(t, rootPath, testYamlName) t.Setenv(env.IncludesVariable, strings.Join( []string{ path.Join(rootPath, testYamlName), diff --git a/bundle/deploy/filer.go b/bundle/deploy/filer.go new file mode 100644 index 000000000..c0fd839ef --- /dev/null +++ b/bundle/deploy/filer.go @@ -0,0 +1,14 @@ +package deploy + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/filer" +) + +// FilerFactory is a function that returns a filer.Filer. +type FilerFactory func(b *bundle.Bundle) (filer.Filer, error) + +// StateFiler returns a filer.Filer that can be used to read/write state files. +func StateFiler(b *bundle.Bundle) (filer.Filer, error) { + return filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath) +} diff --git a/bundle/deploy/files/delete.go b/bundle/deploy/files/delete.go index 9f7ad4d41..8585ec3c8 100644 --- a/bundle/deploy/files/delete.go +++ b/bundle/deploy/files/delete.go @@ -45,7 +45,7 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error { } // Clean up sync snapshot file - sync, err := getSync(ctx, b) + sync, err := GetSync(ctx, b) if err != nil { return err } diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index 148a63ff6..8de80c22f 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -8,7 +8,15 @@ import ( "github.com/databricks/cli/libs/sync" ) -func getSync(ctx context.Context, b *bundle.Bundle) (*sync.Sync, error) { +func GetSync(ctx context.Context, b *bundle.Bundle) (*sync.Sync, error) { + opts, err := GetSyncOptions(ctx, b) + if err != nil { + return nil, fmt.Errorf("cannot get sync options: %w", err) + } + return sync.New(ctx, *opts) +} + +func GetSyncOptions(ctx context.Context, b *bundle.Bundle) (*sync.SyncOptions, error) { cacheDir, err := b.CacheDir(ctx) if err != nil { return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) @@ -19,17 +27,22 @@ func getSync(ctx context.Context, b *bundle.Bundle) (*sync.Sync, error) { return nil, fmt.Errorf("cannot get list of sync includes: %w", err) } - opts := sync.SyncOptions{ + opts := &sync.SyncOptions{ LocalPath: b.Config.Path, RemotePath: b.Config.Workspace.FilePath, Include: includes, Exclude: b.Config.Sync.Exclude, + Host: b.WorkspaceClient().Config.Host, - Full: false, - CurrentUser: b.Config.Workspace.CurrentUser.User, + Full: false, SnapshotBasePath: cacheDir, WorkspaceClient: b.WorkspaceClient(), } - return sync.New(ctx, opts) + + if b.Config.Workspace.CurrentUser != nil { + opts.CurrentUser = b.Config.Workspace.CurrentUser.User + } + + return opts, nil } diff --git a/bundle/deploy/files/upload.go b/bundle/deploy/files/upload.go index 26d1ef4b5..4da41e202 100644 --- a/bundle/deploy/files/upload.go +++ b/bundle/deploy/files/upload.go @@ -17,7 +17,7 @@ func (m *upload) Name() string { func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath)) - sync, err := getSync(ctx, b) + sync, err := GetSync(ctx, b) if err != nil { return err } diff --git a/bundle/deploy/state.go b/bundle/deploy/state.go new file mode 100644 index 000000000..ffcadc9d6 --- /dev/null +++ b/bundle/deploy/state.go @@ -0,0 +1,174 @@ +package deploy + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "time" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/fileset" +) + +const DeploymentStateFileName = "deployment.json" +const DeploymentStateVersion = 1 + +type File struct { + LocalPath string `json:"local_path"` + + // If true, this file is a notebook. + // This property must be persisted because notebooks are stripped of their extension. + // If the local file is no longer present, we need to know what to remove on the workspace side. + IsNotebook bool `json:"is_notebook"` +} + +type Filelist []File + +type DeploymentState struct { + // Version is the version of the deployment state. + // To be incremented when the schema changes. + Version int64 `json:"version"` + + // Seq is the sequence number of the deployment state. + // This number is incremented on every deployment. + // It is used to detect if the deployment state is stale. + Seq int64 `json:"seq"` + + // CliVersion is the version of the CLI which created the deployment state. + CliVersion string `json:"cli_version"` + + // Timestamp is the time when the deployment state was created. + Timestamp time.Time `json:"timestamp"` + + // Files is a list of files which has been deployed as part of this deployment. + Files Filelist `json:"files"` +} + +// We use this entry type as a proxy to fs.DirEntry. +// When we construct sync snapshot from deployment state, +// we use a fileset.File which embeds fs.DirEntry as the DirEntry field. +// Because we can't marshal/unmarshal fs.DirEntry directly, instead when we unmarshal +// the deployment state, we use this entry type to represent the fs.DirEntry in fileset.File instance. +type entry struct { + path string + info fs.FileInfo +} + +func newEntry(path string) *entry { + info, err := os.Stat(path) + if err != nil { + return &entry{path, nil} + } + + return &entry{path, info} +} + +func (e *entry) Name() string { + return filepath.Base(e.path) +} + +func (e *entry) IsDir() bool { + // If the entry is nil, it is a non-existent file so return false. + if e.info == nil { + return false + } + return e.info.IsDir() +} + +func (e *entry) Type() fs.FileMode { + // If the entry is nil, it is a non-existent file so return 0. + if e.info == nil { + return 0 + } + return e.info.Mode() +} + +func (e *entry) Info() (fs.FileInfo, error) { + if e.info == nil { + return nil, fmt.Errorf("no info available") + } + return e.info, nil +} + +func FromSlice(files []fileset.File) (Filelist, error) { + var f Filelist + for k := range files { + file := &files[k] + isNotebook, err := file.IsNotebook() + if err != nil { + return nil, err + } + f = append(f, File{ + LocalPath: file.Relative, + IsNotebook: isNotebook, + }) + } + return f, nil +} + +func (f Filelist) ToSlice(basePath string) []fileset.File { + var files []fileset.File + for _, file := range f { + absPath := filepath.Join(basePath, file.LocalPath) + if file.IsNotebook { + files = append(files, fileset.NewNotebookFile(newEntry(absPath), absPath, file.LocalPath)) + } else { + files = append(files, fileset.NewSourceFile(newEntry(absPath), absPath, file.LocalPath)) + } + } + return files +} + +func isLocalStateStale(local io.Reader, remote io.Reader) bool { + localState, err := loadState(local) + if err != nil { + return true + } + + remoteState, err := loadState(remote) + if err != nil { + return false + } + + return localState.Seq < remoteState.Seq +} + +func validateRemoteStateCompatibility(remote io.Reader) error { + state, err := loadState(remote) + if err != nil { + return err + } + + // If the remote state version is greater than the CLI version, we can't proceed. + if state.Version > DeploymentStateVersion { + return fmt.Errorf("remote deployment state is incompatible with the current version of the CLI, please upgrade to at least %s", state.CliVersion) + } + + return nil +} + +func loadState(r io.Reader) (*DeploymentState, error) { + content, err := io.ReadAll(r) + if err != nil { + return nil, err + } + var s DeploymentState + err = json.Unmarshal(content, &s) + if err != nil { + return nil, err + } + + return &s, nil +} + +func getPathToStateFile(ctx context.Context, b *bundle.Bundle) (string, error) { + cacheDir, err := b.CacheDir(ctx) + if err != nil { + return "", fmt.Errorf("cannot get bundle cache directory: %w", err) + } + return filepath.Join(cacheDir, DeploymentStateFileName), nil +} diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go new file mode 100644 index 000000000..089a870cb --- /dev/null +++ b/bundle/deploy/state_pull.go @@ -0,0 +1,126 @@ +package deploy + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "io/fs" + "os" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/files" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/sync" +) + +type statePull struct { + filerFactory FilerFactory +} + +func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { + f, err := s.filerFactory(b) + if err != nil { + return err + } + + // Download deployment state file from filer to local cache directory. + log.Infof(ctx, "Opening remote deployment state file") + remote, err := s.remoteState(ctx, f) + if err != nil { + log.Infof(ctx, "Unable to open remote deployment state file: %s", err) + return err + } + if remote == nil { + log.Infof(ctx, "Remote deployment state file does not exist") + return nil + } + + statePath, err := getPathToStateFile(ctx, b) + if err != nil { + return err + } + + local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0600) + if err != nil { + return err + } + defer local.Close() + + data := remote.Bytes() + err = validateRemoteStateCompatibility(bytes.NewReader(data)) + if err != nil { + return err + } + + if !isLocalStateStale(local, bytes.NewReader(data)) { + log.Infof(ctx, "Local deployment state is the same or newer, ignoring remote state") + return nil + } + + // Truncating the file before writing + local.Truncate(0) + local.Seek(0, 0) + + // Write file to disk. + log.Infof(ctx, "Writing remote deployment state file to local cache directory") + _, err = io.Copy(local, bytes.NewReader(data)) + if err != nil { + return err + } + + var state DeploymentState + err = json.Unmarshal(data, &state) + if err != nil { + return err + } + + // Create a new snapshot based on the deployment state file. + opts, err := files.GetSyncOptions(ctx, b) + if err != nil { + return err + } + + log.Infof(ctx, "Creating new snapshot") + snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.Config.Path), opts) + if err != nil { + return err + } + + // Persist the snapshot to disk. + log.Infof(ctx, "Persisting snapshot to disk") + return snapshot.Save(ctx) +} + +func (s *statePull) remoteState(ctx context.Context, f filer.Filer) (*bytes.Buffer, error) { + // Download deployment state file from filer to local cache directory. + remote, err := f.Read(ctx, DeploymentStateFileName) + if err != nil { + // On first deploy this file doesn't yet exist. + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + return nil, err + } + + defer remote.Close() + + var buf bytes.Buffer + _, err = io.Copy(&buf, remote) + if err != nil { + return nil, err + } + + return &buf, nil +} + +func (s *statePull) Name() string { + return "deploy:state-pull" +} + +// StatePull returns a mutator that pulls the deployment state from the Databricks workspace +func StatePull() bundle.Mutator { + return &statePull{StateFiler} +} diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go new file mode 100644 index 000000000..50eb90916 --- /dev/null +++ b/bundle/deploy/state_pull_test.go @@ -0,0 +1,457 @@ +package deploy + +import ( + "bytes" + "context" + "encoding/json" + "io" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/deploy/files" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/sync" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +type snapshortStateExpectations struct { + localToRemoteNames map[string]string + remoteToLocalNames map[string]string +} + +type statePullExpectations struct { + seq int + filesInDevelopmentState []File + snapshotState *snapshortStateExpectations +} + +type statePullOpts struct { + files []File + seq int + localFiles []string + localNotebooks []string + expects statePullExpectations + withExistingSnapshot bool + localState *DeploymentState +} + +func testStatePull(t *testing.T, opts statePullOpts) { + s := &statePull{func(b *bundle.Bundle) (filer.Filer, error) { + f := mockfiler.NewMockFiler(t) + + deploymentStateData, err := json.Marshal(DeploymentState{ + Version: DeploymentStateVersion, + Seq: int64(opts.seq), + Files: opts.files, + }) + require.NoError(t, err) + + f.EXPECT().Read(mock.Anything, DeploymentStateFileName).Return(io.NopCloser(bytes.NewReader(deploymentStateData)), nil) + + return f, nil + }} + + b := &bundle.Bundle{ + Config: config.Root{ + Path: t.TempDir(), + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "test-user", + }, + }, + }, + }, + } + ctx := context.Background() + + for _, file := range opts.localFiles { + testutil.Touch(t, filepath.Join(b.Config.Path, "bar"), file) + } + + for _, file := range opts.localNotebooks { + testutil.TouchNotebook(t, filepath.Join(b.Config.Path, "bar"), file) + } + + if opts.withExistingSnapshot { + opts, err := files.GetSyncOptions(ctx, b) + require.NoError(t, err) + + snapshotPath, err := sync.SnapshotPath(opts) + require.NoError(t, err) + + err = os.WriteFile(snapshotPath, []byte("snapshot"), 0644) + require.NoError(t, err) + } + + if opts.localState != nil { + statePath, err := getPathToStateFile(ctx, b) + require.NoError(t, err) + + data, err := json.Marshal(opts.localState) + require.NoError(t, err) + + err = os.WriteFile(statePath, data, 0644) + require.NoError(t, err) + } + + err := bundle.Apply(ctx, b, s) + require.NoError(t, err) + + // Check that deployment state was written + statePath, err := getPathToStateFile(ctx, b) + require.NoError(t, err) + + data, err := os.ReadFile(statePath) + require.NoError(t, err) + + var state DeploymentState + err = json.Unmarshal(data, &state) + require.NoError(t, err) + + require.Equal(t, int64(opts.expects.seq), state.Seq) + require.Len(t, state.Files, len(opts.expects.filesInDevelopmentState)) + for i, file := range opts.expects.filesInDevelopmentState { + require.Equal(t, file.LocalPath, state.Files[i].LocalPath) + } + + if opts.expects.snapshotState != nil { + syncOpts, err := files.GetSyncOptions(ctx, b) + require.NoError(t, err) + + snapshotPath, err := sync.SnapshotPath(syncOpts) + require.NoError(t, err) + + _, err = os.Stat(snapshotPath) + require.NoError(t, err) + + data, err = os.ReadFile(snapshotPath) + require.NoError(t, err) + + var snapshot sync.Snapshot + err = json.Unmarshal(data, &snapshot) + require.NoError(t, err) + + snapshotState := snapshot.SnapshotState + require.Len(t, snapshotState.LocalToRemoteNames, len(opts.expects.snapshotState.localToRemoteNames)) + for local, remote := range opts.expects.snapshotState.localToRemoteNames { + require.Equal(t, remote, snapshotState.LocalToRemoteNames[local]) + } + + require.Len(t, snapshotState.RemoteToLocalNames, len(opts.expects.snapshotState.remoteToLocalNames)) + for remote, local := range opts.expects.snapshotState.remoteToLocalNames { + require.Equal(t, local, snapshotState.RemoteToLocalNames[remote]) + } + } +} + +var stateFiles []File = []File{ + { + LocalPath: "bar/t1.py", + IsNotebook: false, + }, + { + LocalPath: "bar/t2.py", + IsNotebook: false, + }, + { + LocalPath: "bar/notebook.py", + IsNotebook: true, + }, +} + +func TestStatePull(t *testing.T) { + testStatePull(t, statePullOpts{ + seq: 1, + files: stateFiles, + localFiles: []string{"t1.py", "t2.py"}, + localNotebooks: []string{"notebook.py"}, + expects: statePullExpectations{ + seq: 1, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + { + LocalPath: "bar/notebook.py", + }, + }, + snapshotState: &snapshortStateExpectations{ + localToRemoteNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook.py": "bar/notebook", + }, + remoteToLocalNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook": "bar/notebook.py", + }, + }, + }, + }) +} + +func TestStatePullSnapshotExists(t *testing.T) { + testStatePull(t, statePullOpts{ + withExistingSnapshot: true, + seq: 1, + files: stateFiles, + localFiles: []string{"t1.py", "t2.py"}, + expects: statePullExpectations{ + seq: 1, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + { + LocalPath: "bar/notebook.py", + }, + }, + snapshotState: &snapshortStateExpectations{ + localToRemoteNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook.py": "bar/notebook", + }, + remoteToLocalNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook": "bar/notebook.py", + }, + }, + }, + }) +} + +func TestStatePullNoState(t *testing.T) { + s := &statePull{func(b *bundle.Bundle) (filer.Filer, error) { + f := mockfiler.NewMockFiler(t) + + f.EXPECT().Read(mock.Anything, DeploymentStateFileName).Return(nil, os.ErrNotExist) + + return f, nil + }} + + b := &bundle.Bundle{ + Config: config.Root{ + Path: t.TempDir(), + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + }, + }, + } + ctx := context.Background() + + err := bundle.Apply(ctx, b, s) + require.NoError(t, err) + + // Check that deployment state was not written + statePath, err := getPathToStateFile(ctx, b) + require.NoError(t, err) + + _, err = os.Stat(statePath) + require.True(t, os.IsNotExist(err)) +} + +func TestStatePullOlderState(t *testing.T) { + testStatePull(t, statePullOpts{ + seq: 1, + files: stateFiles, + localFiles: []string{"t1.py", "t2.py"}, + localNotebooks: []string{"notebook.py"}, + localState: &DeploymentState{ + Version: DeploymentStateVersion, + Seq: 2, + Files: []File{ + { + LocalPath: "bar/t1.py", + }, + }, + }, + expects: statePullExpectations{ + seq: 2, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + }, + }, + }) +} + +func TestStatePullNewerState(t *testing.T) { + testStatePull(t, statePullOpts{ + seq: 1, + files: stateFiles, + localFiles: []string{"t1.py", "t2.py"}, + localNotebooks: []string{"notebook.py"}, + localState: &DeploymentState{ + Version: DeploymentStateVersion, + Seq: 0, + Files: []File{ + { + LocalPath: "bar/t1.py", + }, + }, + }, + expects: statePullExpectations{ + seq: 1, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + { + LocalPath: "bar/notebook.py", + }, + }, + snapshotState: &snapshortStateExpectations{ + localToRemoteNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook.py": "bar/notebook", + }, + remoteToLocalNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook": "bar/notebook.py", + }, + }, + }, + }) +} + +func TestStatePullAndFileIsRemovedLocally(t *testing.T) { + testStatePull(t, statePullOpts{ + seq: 1, + files: stateFiles, + localFiles: []string{"t2.py"}, // t1.py is removed locally + localNotebooks: []string{"notebook.py"}, + expects: statePullExpectations{ + seq: 1, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + { + LocalPath: "bar/notebook.py", + }, + }, + snapshotState: &snapshortStateExpectations{ + localToRemoteNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook.py": "bar/notebook", + }, + remoteToLocalNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook": "bar/notebook.py", + }, + }, + }, + }) +} + +func TestStatePullAndNotebookIsRemovedLocally(t *testing.T) { + testStatePull(t, statePullOpts{ + seq: 1, + files: stateFiles, + localFiles: []string{"t1.py", "t2.py"}, + localNotebooks: []string{}, // notebook.py is removed locally + expects: statePullExpectations{ + seq: 1, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + { + LocalPath: "bar/notebook.py", + }, + }, + snapshotState: &snapshortStateExpectations{ + localToRemoteNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook.py": "bar/notebook", + }, + remoteToLocalNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook": "bar/notebook.py", + }, + }, + }, + }) +} + +func TestStatePullNewerDeploymentStateVersion(t *testing.T) { + s := &statePull{func(b *bundle.Bundle) (filer.Filer, error) { + f := mockfiler.NewMockFiler(t) + + deploymentStateData, err := json.Marshal(DeploymentState{ + Version: DeploymentStateVersion + 1, + Seq: 1, + CliVersion: "1.2.3", + Files: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + }, + }) + require.NoError(t, err) + + f.EXPECT().Read(mock.Anything, DeploymentStateFileName).Return(io.NopCloser(bytes.NewReader(deploymentStateData)), nil) + + return f, nil + }} + + b := &bundle.Bundle{ + Config: config.Root{ + Path: t.TempDir(), + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + }, + }, + } + ctx := context.Background() + + err := bundle.Apply(ctx, b, s) + require.Error(t, err) + require.Contains(t, err.Error(), "remote deployment state is incompatible with the current version of the CLI, please upgrade to at least 1.2.3") +} diff --git a/bundle/deploy/state_push.go b/bundle/deploy/state_push.go new file mode 100644 index 000000000..8818d0a73 --- /dev/null +++ b/bundle/deploy/state_push.go @@ -0,0 +1,49 @@ +package deploy + +import ( + "context" + "os" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/log" +) + +type statePush struct { + filerFactory FilerFactory +} + +func (s *statePush) Name() string { + return "deploy:state-push" +} + +func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { + f, err := s.filerFactory(b) + if err != nil { + return err + } + + statePath, err := getPathToStateFile(ctx, b) + if err != nil { + return err + } + + local, err := os.Open(statePath) + if err != nil { + return err + } + defer local.Close() + + log.Infof(ctx, "Writing local deployment state file to remote state directory") + err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists) + if err != nil { + return err + } + + return nil +} + +// StatePush returns a mutator that pushes the deployment state file to Databricks workspace. +func StatePush() bundle.Mutator { + return &statePush{StateFiler} +} diff --git a/bundle/deploy/state_push_test.go b/bundle/deploy/state_push_test.go new file mode 100644 index 000000000..37b865ecb --- /dev/null +++ b/bundle/deploy/state_push_test.go @@ -0,0 +1,82 @@ +package deploy + +import ( + "context" + "encoding/json" + "io" + "os" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestStatePush(t *testing.T) { + s := &statePush{func(b *bundle.Bundle) (filer.Filer, error) { + f := mockfiler.NewMockFiler(t) + + f.EXPECT().Write(mock.Anything, DeploymentStateFileName, mock.MatchedBy(func(r *os.File) bool { + bytes, err := io.ReadAll(r) + if err != nil { + return false + } + + var state DeploymentState + err = json.Unmarshal(bytes, &state) + if err != nil { + return false + } + + if state.Seq != 1 { + return false + } + + if len(state.Files) != 1 { + return false + } + + return true + }), filer.CreateParentDirectories, filer.OverwriteIfExists).Return(nil) + return f, nil + }} + + b := &bundle.Bundle{ + Config: config.Root{ + Path: t.TempDir(), + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + }, + }, + } + + ctx := context.Background() + + statePath, err := getPathToStateFile(ctx, b) + require.NoError(t, err) + + state := DeploymentState{ + Version: DeploymentStateVersion, + Seq: 1, + Files: []File{ + { + LocalPath: "bar/t1.py", + }, + }, + } + + data, err := json.Marshal(state) + require.NoError(t, err) + + err = os.WriteFile(statePath, data, 0644) + require.NoError(t, err) + + err = bundle.Apply(ctx, b, s) + require.NoError(t, err) +} diff --git a/bundle/deploy/state_test.go b/bundle/deploy/state_test.go new file mode 100644 index 000000000..15bdc96b4 --- /dev/null +++ b/bundle/deploy/state_test.go @@ -0,0 +1,79 @@ +package deploy + +import ( + "bytes" + "encoding/json" + "path/filepath" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/fileset" + "github.com/stretchr/testify/require" +) + +func TestFromSlice(t *testing.T) { + tmpDir := t.TempDir() + fileset := fileset.New(tmpDir) + testutil.Touch(t, tmpDir, "test1.py") + testutil.Touch(t, tmpDir, "test2.py") + testutil.Touch(t, tmpDir, "test3.py") + + files, err := fileset.All() + require.NoError(t, err) + + f, err := FromSlice(files) + require.NoError(t, err) + require.Len(t, f, 3) + + for _, file := range f { + require.Contains(t, []string{"test1.py", "test2.py", "test3.py"}, file.LocalPath) + } +} + +func TestToSlice(t *testing.T) { + tmpDir := t.TempDir() + fileset := fileset.New(tmpDir) + testutil.Touch(t, tmpDir, "test1.py") + testutil.Touch(t, tmpDir, "test2.py") + testutil.Touch(t, tmpDir, "test3.py") + + files, err := fileset.All() + require.NoError(t, err) + + f, err := FromSlice(files) + require.NoError(t, err) + require.Len(t, f, 3) + + s := f.ToSlice(tmpDir) + require.Len(t, s, 3) + + for _, file := range s { + require.Contains(t, []string{"test1.py", "test2.py", "test3.py"}, file.Name()) + require.Contains(t, []string{ + filepath.Join(tmpDir, "test1.py"), + filepath.Join(tmpDir, "test2.py"), + filepath.Join(tmpDir, "test3.py"), + }, file.Absolute) + require.False(t, file.IsDir()) + require.NotZero(t, file.Type()) + info, err := file.Info() + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, file.Name(), info.Name()) + } +} + +func TestIsLocalStateStale(t *testing.T) { + oldState, err := json.Marshal(DeploymentState{ + Seq: 1, + }) + require.NoError(t, err) + + newState, err := json.Marshal(DeploymentState{ + Seq: 2, + }) + require.NoError(t, err) + + require.True(t, isLocalStateStale(bytes.NewReader(oldState), bytes.NewReader(newState))) + require.False(t, isLocalStateStale(bytes.NewReader(newState), bytes.NewReader(oldState))) +} diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go new file mode 100644 index 000000000..0ae61a6e2 --- /dev/null +++ b/bundle/deploy/state_update.go @@ -0,0 +1,108 @@ +package deploy + +import ( + "bytes" + "context" + "encoding/json" + "io" + "os" + "time" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/files" + "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/libs/log" +) + +type stateUpdate struct { +} + +func (s *stateUpdate) Name() string { + return "deploy:state-update" +} + +func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) error { + state, err := load(ctx, b) + if err != nil { + return err + } + + // Increment the state sequence. + state.Seq = state.Seq + 1 + + // Update timestamp. + state.Timestamp = time.Now().UTC() + + // Update the CLI version and deployment state version. + state.CliVersion = build.GetInfo().Version + state.Version = DeploymentStateVersion + + // Get the current file list. + sync, err := files.GetSync(ctx, b) + if err != nil { + return err + } + + files, err := sync.GetFileList(ctx) + if err != nil { + return err + } + + // Update the state with the current file list. + fl, err := FromSlice(files) + if err != nil { + return err + } + state.Files = fl + + statePath, err := getPathToStateFile(ctx, b) + if err != nil { + return err + } + // Write the state back to the file. + f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0600) + if err != nil { + log.Infof(ctx, "Unable to open deployment state file: %s", err) + return err + } + defer f.Close() + + data, err := json.Marshal(state) + if err != nil { + return err + } + + _, err = io.Copy(f, bytes.NewReader(data)) + if err != nil { + return err + } + + return nil +} + +func StateUpdate() bundle.Mutator { + return &stateUpdate{} +} + +func load(ctx context.Context, b *bundle.Bundle) (*DeploymentState, error) { + // If the file does not exist, return a new DeploymentState. + statePath, err := getPathToStateFile(ctx, b) + if err != nil { + return nil, err + } + + log.Infof(ctx, "Loading deployment state from %s", statePath) + f, err := os.Open(statePath) + if err != nil { + if os.IsNotExist(err) { + log.Infof(ctx, "No deployment state file found") + return &DeploymentState{ + Version: DeploymentStateVersion, + CliVersion: build.GetInfo().Version, + }, nil + } + return nil, err + } + defer f.Close() + return loadState(f) +} diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go new file mode 100644 index 000000000..5e16dd008 --- /dev/null +++ b/bundle/deploy/state_update_test.go @@ -0,0 +1,149 @@ +package deploy + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/internal/testutil" + databrickscfg "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestStateUpdate(t *testing.T) { + s := &stateUpdate{} + + b := &bundle.Bundle{ + Config: config.Root{ + Path: t.TempDir(), + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + FilePath: "/files", + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "test-user", + }, + }, + }, + }, + } + + testutil.Touch(t, b.Config.Path, "test1.py") + testutil.Touch(t, b.Config.Path, "test2.py") + + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &databrickscfg.Config{ + Host: "https://test.com", + } + b.SetWorkpaceClient(m.WorkspaceClient) + + wsApi := m.GetMockWorkspaceAPI() + wsApi.EXPECT().GetStatusByPath(mock.Anything, "/files").Return(&workspace.ObjectInfo{ + ObjectType: "DIRECTORY", + }, nil) + + ctx := context.Background() + + err := bundle.Apply(ctx, b, s) + require.NoError(t, err) + + // Check that the state file was updated. + state, err := load(ctx, b) + require.NoError(t, err) + + require.Equal(t, int64(1), state.Seq) + require.Len(t, state.Files, 3) + require.Equal(t, build.GetInfo().Version, state.CliVersion) + + err = bundle.Apply(ctx, b, s) + require.NoError(t, err) + + // Check that the state file was updated again. + state, err = load(ctx, b) + require.NoError(t, err) + + require.Equal(t, int64(2), state.Seq) + require.Len(t, state.Files, 3) + require.Equal(t, build.GetInfo().Version, state.CliVersion) +} + +func TestStateUpdateWithExistingState(t *testing.T) { + s := &stateUpdate{} + + b := &bundle.Bundle{ + Config: config.Root{ + Path: t.TempDir(), + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + FilePath: "/files", + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "test-user", + }, + }, + }, + }, + } + + testutil.Touch(t, b.Config.Path, "test1.py") + testutil.Touch(t, b.Config.Path, "test2.py") + + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &databrickscfg.Config{ + Host: "https://test.com", + } + b.SetWorkpaceClient(m.WorkspaceClient) + + wsApi := m.GetMockWorkspaceAPI() + wsApi.EXPECT().GetStatusByPath(mock.Anything, "/files").Return(&workspace.ObjectInfo{ + ObjectType: "DIRECTORY", + }, nil) + + ctx := context.Background() + + // Create an existing state file. + statePath, err := getPathToStateFile(ctx, b) + require.NoError(t, err) + + state := &DeploymentState{ + Version: DeploymentStateVersion, + Seq: 10, + CliVersion: build.GetInfo().Version, + Files: []File{ + { + LocalPath: "bar/t1.py", + }, + }, + } + + data, err := json.Marshal(state) + require.NoError(t, err) + + err = os.WriteFile(statePath, data, 0644) + require.NoError(t, err) + + err = bundle.Apply(ctx, b, s) + require.NoError(t, err) + + // Check that the state file was updated. + state, err = load(ctx, b) + require.NoError(t, err) + + require.Equal(t, int64(11), state.Seq) + require.Len(t, state.Files, 3) + require.Equal(t, build.GetInfo().Version, state.CliVersion) +} diff --git a/bundle/deploy/terraform/filer.go b/bundle/deploy/terraform/filer.go deleted file mode 100644 index b1fa5a1bd..000000000 --- a/bundle/deploy/terraform/filer.go +++ /dev/null @@ -1,14 +0,0 @@ -package terraform - -import ( - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/libs/filer" -) - -// filerFunc is a function that returns a filer.Filer. -type filerFunc func(b *bundle.Bundle) (filer.Filer, error) - -// stateFiler returns a filer.Filer that can be used to read/write state files. -func stateFiler(b *bundle.Bundle) (filer.Filer, error) { - return filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath) -} diff --git a/bundle/deploy/terraform/state_pull.go b/bundle/deploy/terraform/state_pull.go index 14e8ecf12..045222ae0 100644 --- a/bundle/deploy/terraform/state_pull.go +++ b/bundle/deploy/terraform/state_pull.go @@ -10,12 +10,13 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" ) type statePull struct { - filerFunc + filerFactory deploy.FilerFactory } func (l *statePull) Name() string { @@ -45,7 +46,7 @@ func (l *statePull) remoteState(ctx context.Context, f filer.Filer) (*bytes.Buff } func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { - f, err := l.filerFunc(b) + f, err := l.filerFactory(b) if err != nil { return err } @@ -94,5 +95,5 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { } func StatePull() bundle.Mutator { - return &statePull{stateFiler} + return &statePull{deploy.StateFiler} } diff --git a/bundle/deploy/terraform/state_push.go b/bundle/deploy/terraform/state_push.go index a51403295..f701db87d 100644 --- a/bundle/deploy/terraform/state_push.go +++ b/bundle/deploy/terraform/state_push.go @@ -6,13 +6,14 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" ) type statePush struct { - filerFunc + filerFactory deploy.FilerFactory } func (l *statePush) Name() string { @@ -20,7 +21,7 @@ func (l *statePush) Name() string { } func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { - f, err := l.filerFunc(b) + f, err := l.filerFactory(b) if err != nil { return err } @@ -49,5 +50,5 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { } func StatePush() bundle.Mutator { - return &statePush{stateFiler} + return &statePush{deploy.StateFiler} } diff --git a/bundle/deploy/terraform/state_test.go b/bundle/deploy/terraform/state_test.go index ee15b953b..ff3250625 100644 --- a/bundle/deploy/terraform/state_test.go +++ b/bundle/deploy/terraform/state_test.go @@ -8,12 +8,13 @@ import ( "testing" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/require" ) -// identityFiler returns a filerFunc that returns the specified filer. -func identityFiler(f filer.Filer) filerFunc { +// identityFiler returns a FilerFactory that returns the specified filer. +func identityFiler(f filer.Filer) deploy.FilerFactory { return func(_ *bundle.Bundle) (filer.Filer, error) { return f, nil } diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 5c6575509..f266a98f8 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -24,6 +24,7 @@ func Deploy() bundle.Mutator { bundle.Defer( bundle.Seq( terraform.StatePull(), + deploy.StatePull(), deploy.CheckRunningResource(), mutator.ValidateGitDetails(), libraries.MatchWithArtifacts(), @@ -31,6 +32,7 @@ func Deploy() bundle.Mutator { artifacts.UploadAll(), python.TransformWheelTask(), files.Upload(), + deploy.StateUpdate(), permissions.ApplyWorkspaceRootPermissions(), terraform.Interpolate(), terraform.Write(), @@ -38,6 +40,7 @@ func Deploy() bundle.Mutator { terraform.Apply(), bundle.Seq( terraform.StatePush(), + deploy.StatePush(), terraform.Load(), metadata.Compute(), metadata.Upload(), diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index 20ec2fcd3..0b7ab4473 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -5,6 +5,7 @@ import ( "time" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" @@ -20,28 +21,14 @@ type syncFlags struct { } func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) { - cacheDir, err := b.CacheDir(cmd.Context()) + opts, err := files.GetSyncOptions(cmd.Context(), b) if err != nil { - return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) + return nil, fmt.Errorf("cannot get sync options: %w", err) } - includes, err := b.GetSyncIncludePatterns(cmd.Context()) - if err != nil { - return nil, fmt.Errorf("cannot get list of sync includes: %w", err) - } - - opts := sync.SyncOptions{ - LocalPath: b.Config.Path, - RemotePath: b.Config.Workspace.FilePath, - Include: includes, - Exclude: b.Config.Sync.Exclude, - Full: f.full, - PollInterval: f.interval, - - SnapshotBasePath: cacheDir, - WorkspaceClient: b.WorkspaceClient(), - } - return &opts, nil + opts.Full = f.full + opts.PollInterval = f.interval + return opts, nil } func newSyncCommand() *cobra.Command { diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index f08d3d61a..6899d6fe1 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -10,6 +10,7 @@ import ( "time" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/sync" @@ -29,28 +30,14 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b * return nil, fmt.Errorf("SRC and DST are not configurable in the context of a bundle") } - cacheDir, err := b.CacheDir(cmd.Context()) + opts, err := files.GetSyncOptions(cmd.Context(), b) if err != nil { - return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) + return nil, fmt.Errorf("cannot get sync options: %w", err) } - includes, err := b.GetSyncIncludePatterns(cmd.Context()) - if err != nil { - return nil, fmt.Errorf("cannot get list of sync includes: %w", err) - } - - opts := sync.SyncOptions{ - LocalPath: b.Config.Path, - RemotePath: b.Config.Workspace.FilePath, - Include: includes, - Exclude: b.Config.Sync.Exclude, - Full: f.full, - PollInterval: f.interval, - - SnapshotBasePath: cacheDir, - WorkspaceClient: b.WorkspaceClient(), - } - return &opts, nil + opts.Full = f.full + opts.PollInterval = f.interval + return opts, nil } func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*sync.SyncOptions, error) { diff --git a/internal/bundle/deployment_state_test.go b/internal/bundle/deployment_state_test.go new file mode 100644 index 000000000..25f36d4a2 --- /dev/null +++ b/internal/bundle/deployment_state_test.go @@ -0,0 +1,102 @@ +package bundle + +import ( + "os" + "path" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle/deploy" + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + nodeTypeId := internal.GetNodeTypeId(env) + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "spark_version": "13.3.x-scala2.12", + "node_type_id": nodeTypeId, + }) + require.NoError(t, err) + + t.Setenv("BUNDLE_ROOT", bundleRoot) + + // Add some test file to the bundle + err = os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0644) + require.NoError(t, err) + + err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0644) + require.NoError(t, err) + + // Add notebook to the bundle + err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0644) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + t.Cleanup(func() { + destroyBundle(t, ctx, bundleRoot) + }) + + remoteRoot := getBundleRemoteRootPath(w, t, uniqueId) + + // Check that test file is in workspace + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test.py")) + require.NoError(t, err) + + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test_to_modify.py")) + require.NoError(t, err) + + // Check that notebook is in workspace + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "notebook")) + require.NoError(t, err) + + // Check that deployment.json is synced correctly + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "state", deploy.DeploymentStateFileName)) + require.NoError(t, err) + + // Remove .databricks directory to simulate a fresh deployment like in CI/CD environment + err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) + require.NoError(t, err) + + // Remove the file from the bundle + err = os.Remove(filepath.Join(bundleRoot, "test.py")) + require.NoError(t, err) + + // Remove the notebook from the bundle and deploy again + err = os.Remove(filepath.Join(bundleRoot, "notebook.py")) + require.NoError(t, err) + + // Modify the content of another file + err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0644) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + // Check that removed file is not in workspace anymore + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test.py")) + require.ErrorContains(t, err, "files/test.py") + require.ErrorContains(t, err, "doesn't exist") + + // Check that removed notebook is not in workspace anymore + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "notebook")) + require.ErrorContains(t, err, "files/notebook") + require.ErrorContains(t, err, "doesn't exist") + + // Check the content of modified file + content, err := w.Workspace.ReadFile(ctx, path.Join(remoteRoot, "files", "test_to_modify.py")) + require.NoError(t, err) + require.Equal(t, "print('Modified!')", string(content)) +} diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index a8fbd230e..10e315bde 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -3,6 +3,7 @@ package bundle import ( "context" "encoding/json" + "fmt" "os" "path/filepath" "strings" @@ -13,6 +14,8 @@ import ( "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/template" + "github.com/databricks/databricks-sdk-go" + "github.com/stretchr/testify/require" ) func initTestTemplate(t *testing.T, ctx context.Context, templateName string, config map[string]any) (string, error) { @@ -78,3 +81,11 @@ func destroyBundle(t *testing.T, ctx context.Context, path string) error { _, _, err := c.Run() return err } + +func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, uniqueId string) string { + // Compute root path for the bundle deployment + me, err := w.CurrentUser.Me(context.Background()) + require.NoError(t, err) + root := fmt.Sprintf("/Users/%s/.bundle/%s", me.UserName, uniqueId) + return root +} diff --git a/internal/bundle/job_metadata_test.go b/internal/bundle/job_metadata_test.go index 0d8a431e4..cb3ad0818 100644 --- a/internal/bundle/job_metadata_test.go +++ b/internal/bundle/job_metadata_test.go @@ -3,7 +3,6 @@ package bundle import ( "context" "encoding/json" - "fmt" "io" "path" "strconv" @@ -56,9 +55,7 @@ func TestAccJobsMetadataFile(t *testing.T) { assert.Equal(t, job2.Settings.Name, jobName) // Compute root path for the bundle deployment - me, err := w.CurrentUser.Me(context.Background()) - require.NoError(t, err) - root := fmt.Sprintf("/Users/%s/.bundle/%s", me.UserName, uniqueId) + root := getBundleRemoteRootPath(w, t, uniqueId) f, err := filer.NewWorkspaceFilesClient(w, root) require.NoError(t, err) diff --git a/internal/testutil/helpers.go b/internal/testutil/helpers.go new file mode 100644 index 000000000..853cc16cc --- /dev/null +++ b/internal/testutil/helpers.go @@ -0,0 +1,26 @@ +package testutil + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TouchNotebook(t *testing.T, path, file string) { + os.MkdirAll(path, 0755) + f, err := os.Create(filepath.Join(path, file)) + require.NoError(t, err) + + err = os.WriteFile(filepath.Join(path, file), []byte("# Databricks notebook source"), 0644) + require.NoError(t, err) + f.Close() +} + +func Touch(t *testing.T, path, file string) { + os.MkdirAll(path, 0755) + f, err := os.Create(filepath.Join(path, file)) + require.NoError(t, err) + f.Close() +} diff --git a/libs/fileset/file.go b/libs/fileset/file.go index 6594de4ed..17cae7952 100644 --- a/libs/fileset/file.go +++ b/libs/fileset/file.go @@ -3,11 +3,49 @@ package fileset import ( "io/fs" "time" + + "github.com/databricks/cli/libs/notebook" +) + +type fileType int + +const ( + Unknown fileType = iota + Notebook // Databricks notebook file + Source // Any other file type ) type File struct { fs.DirEntry Absolute, Relative string + fileType fileType +} + +func NewNotebookFile(entry fs.DirEntry, absolute string, relative string) File { + return File{ + DirEntry: entry, + Absolute: absolute, + Relative: relative, + fileType: Notebook, + } +} + +func NewFile(entry fs.DirEntry, absolute string, relative string) File { + return File{ + DirEntry: entry, + Absolute: absolute, + Relative: relative, + fileType: Unknown, + } +} + +func NewSourceFile(entry fs.DirEntry, absolute string, relative string) File { + return File{ + DirEntry: entry, + Absolute: absolute, + Relative: relative, + fileType: Source, + } } func (f File) Modified() (ts time.Time) { @@ -18,3 +56,21 @@ func (f File) Modified() (ts time.Time) { } return info.ModTime() } + +func (f *File) IsNotebook() (bool, error) { + if f.fileType != Unknown { + return f.fileType == Notebook, nil + } + + // Otherwise, detect the notebook type. + isNotebook, _, err := notebook.Detect(f.Absolute) + if err != nil { + return false, err + } + if isNotebook { + f.fileType = Notebook + } else { + f.fileType = Source + } + return isNotebook, nil +} diff --git a/libs/fileset/file_test.go b/libs/fileset/file_test.go new file mode 100644 index 000000000..4adcb1c56 --- /dev/null +++ b/libs/fileset/file_test.go @@ -0,0 +1,39 @@ +package fileset + +import ( + "path/filepath" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/require" +) + +func TestNotebookFileIsNotebook(t *testing.T) { + f := NewNotebookFile(nil, "", "") + isNotebook, err := f.IsNotebook() + require.NoError(t, err) + require.True(t, isNotebook) +} + +func TestSourceFileIsNotNotebook(t *testing.T) { + f := NewSourceFile(nil, "", "") + isNotebook, err := f.IsNotebook() + require.NoError(t, err) + require.False(t, isNotebook) +} + +func TestUnknownFileDetectsNotebook(t *testing.T) { + tmpDir := t.TempDir() + testutil.Touch(t, tmpDir, "test.py") + testutil.TouchNotebook(t, tmpDir, "notebook.py") + + f := NewFile(nil, filepath.Join(tmpDir, "test.py"), "test.py") + isNotebook, err := f.IsNotebook() + require.NoError(t, err) + require.False(t, isNotebook) + + f = NewFile(nil, filepath.Join(tmpDir, "notebook.py"), "notebook.py") + isNotebook, err = f.IsNotebook() + require.NoError(t, err) + require.True(t, isNotebook) +} diff --git a/libs/fileset/fileset.go b/libs/fileset/fileset.go index 81b85525c..52463dff3 100644 --- a/libs/fileset/fileset.go +++ b/libs/fileset/fileset.go @@ -84,7 +84,7 @@ func (w *FileSet) recursiveListFiles() (fileList []File, err error) { return nil } - fileList = append(fileList, File{d, path, relPath}) + fileList = append(fileList, NewFile(d, path, relPath)) return nil }) return diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index f9956962e..06b4d13bc 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -53,6 +53,30 @@ type Snapshot struct { const syncSnapshotDirName = "sync-snapshots" +func NewSnapshot(localFiles []fileset.File, opts *SyncOptions) (*Snapshot, error) { + snapshotPath, err := SnapshotPath(opts) + if err != nil { + return nil, err + } + + snapshotState, err := NewSnapshotState(localFiles) + if err != nil { + return nil, err + } + + // Reset last modified times to make sure all files are synced + snapshotState.ResetLastModifiedTimes() + + return &Snapshot{ + SnapshotPath: snapshotPath, + New: true, + Version: LatestSnapshotVersion, + Host: opts.Host, + RemotePath: opts.RemotePath, + SnapshotState: snapshotState, + }, nil +} + func GetFileName(host, remotePath string) string { hash := md5.Sum([]byte(host + remotePath)) hashString := hex.EncodeToString(hash[:]) diff --git a/libs/sync/snapshot_state.go b/libs/sync/snapshot_state.go index 575063521..10cd34e6d 100644 --- a/libs/sync/snapshot_state.go +++ b/libs/sync/snapshot_state.go @@ -7,7 +7,6 @@ import ( "time" "github.com/databricks/cli/libs/fileset" - "github.com/databricks/cli/libs/notebook" ) // SnapshotState keeps track of files on the local filesystem and their corresponding @@ -46,10 +45,12 @@ func NewSnapshotState(localFiles []fileset.File) (*SnapshotState, error) { } // Compute the new state. - for _, f := range localFiles { + for k := range localFiles { + f := &localFiles[k] // Compute the remote name the file will have in WSFS remoteName := filepath.ToSlash(f.Relative) - isNotebook, _, err := notebook.Detect(f.Absolute) + isNotebook, err := f.IsNotebook() + if err != nil { // Ignore this file if we're unable to determine the notebook type. // Trying to upload such a file to the workspace would fail anyway. @@ -72,6 +73,12 @@ func NewSnapshotState(localFiles []fileset.File) (*SnapshotState, error) { return fs, nil } +func (fs *SnapshotState) ResetLastModifiedTimes() { + for k := range fs.LastModifiedTimes { + fs.LastModifiedTimes[k] = time.Unix(0, 0) + } +} + // Consistency checks for the sync files state representation. These are invariants // that downstream code for computing changes to apply to WSFS depends on. // diff --git a/libs/sync/sync.go b/libs/sync/sync.go index beb3f6a33..78faa0c8f 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -151,7 +151,7 @@ func (s *Sync) notifyComplete(ctx context.Context, d diff) { } func (s *Sync) RunOnce(ctx context.Context) error { - files, err := getFileList(ctx, s) + files, err := s.GetFileList(ctx) if err != nil { return err } @@ -182,7 +182,7 @@ func (s *Sync) RunOnce(ctx context.Context) error { return nil } -func getFileList(ctx context.Context, s *Sync) ([]fileset.File, error) { +func (s *Sync) GetFileList(ctx context.Context) ([]fileset.File, error) { // tradeoff: doing portable monitoring only due to macOS max descriptor manual ulimit setting requirement // https://github.com/gorakhargosh/watchdog/blob/master/src/watchdog/observers/kqueue.py#L394-L418 all := set.NewSetF(func(f fileset.File) string { diff --git a/libs/sync/sync_test.go b/libs/sync/sync_test.go index 0f1ad61ba..dc220dbf7 100644 --- a/libs/sync/sync_test.go +++ b/libs/sync/sync_test.go @@ -93,7 +93,7 @@ func TestGetFileSet(t *testing.T) { excludeFileSet: excl, } - fileList, err := getFileList(ctx, s) + fileList, err := s.GetFileList(ctx) require.NoError(t, err) require.Equal(t, len(fileList), 9) @@ -111,7 +111,7 @@ func TestGetFileSet(t *testing.T) { excludeFileSet: excl, } - fileList, err = getFileList(ctx, s) + fileList, err = s.GetFileList(ctx) require.NoError(t, err) require.Equal(t, len(fileList), 1) @@ -129,7 +129,7 @@ func TestGetFileSet(t *testing.T) { excludeFileSet: excl, } - fileList, err = getFileList(ctx, s) + fileList, err = s.GetFileList(ctx) require.NoError(t, err) require.Equal(t, len(fileList), 10) } @@ -158,7 +158,7 @@ func TestRecursiveExclude(t *testing.T) { excludeFileSet: excl, } - fileList, err := getFileList(ctx, s) + fileList, err := s.GetFileList(ctx) require.NoError(t, err) require.Equal(t, len(fileList), 7) } From d216404f278b52f725db5894c1146be0f52f8b98 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 18 Mar 2024 16:39:18 +0100 Subject: [PATCH 085/286] Do CheckRunningResource only after terraform.Write (#1292) ## Changes CheckRunningResource does `terraform.Show` which (I believe) expects valid `bundle.tf.json` which is only written as part of `terraform.Write` later. With this PR order is changed. Fixes #1286 ## Tests Added regression E2E test --- bundle/phases/deploy.go | 2 +- internal/bundle/basic_test.go | 43 +++++++++++++++++++++++++++++++++++ internal/bundle/helpers.go | 9 ++++++++ 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 internal/bundle/basic_test.go diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index f266a98f8..52515a7ea 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -25,7 +25,6 @@ func Deploy() bundle.Mutator { bundle.Seq( terraform.StatePull(), deploy.StatePull(), - deploy.CheckRunningResource(), mutator.ValidateGitDetails(), libraries.MatchWithArtifacts(), artifacts.CleanUp(), @@ -36,6 +35,7 @@ func Deploy() bundle.Mutator { permissions.ApplyWorkspaceRootPermissions(), terraform.Interpolate(), terraform.Write(), + deploy.CheckRunningResource(), bundle.Defer( terraform.Apply(), bundle.Seq( diff --git a/internal/bundle/basic_test.go b/internal/bundle/basic_test.go new file mode 100644 index 000000000..6eb3d10fb --- /dev/null +++ b/internal/bundle/basic_test.go @@ -0,0 +1,43 @@ +package bundle + +import ( + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { + ctx, _ := acc.WorkspaceTest(t) + + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + uniqueId := uuid.New().String() + root, err := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": "13.2.x-snapshot-scala2.12", + }) + require.NoError(t, err) + + t.Cleanup(func() { + err = destroyBundle(t, ctx, root) + require.NoError(t, err) + }) + + // deploy empty bundle + err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) + require.NoError(t, err) + + // Remove .databricks directory to simulate a fresh deployment + err = os.RemoveAll(filepath.Join(root, ".databricks")) + require.NoError(t, err) + + // deploy empty bundle again + err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) + require.NoError(t, err) +} diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 10e315bde..c73d6ad03 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -56,6 +56,15 @@ func deployBundle(t *testing.T, ctx context.Context, path string) error { return err } +func deployBundleWithFlags(t *testing.T, ctx context.Context, path string, flags []string) error { + t.Setenv("BUNDLE_ROOT", path) + args := []string{"bundle", "deploy", "--force-lock"} + args = append(args, flags...) + c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + _, _, err := c.Run() + return err +} + func runResource(t *testing.T, ctx context.Context, path string, key string) (string, error) { ctx = cmdio.NewContext(ctx, cmdio.Default()) From 7c4b34945cbe24196e7cbf4f9b5277c92dc3a3e9 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 18 Mar 2024 17:23:39 +0100 Subject: [PATCH 086/286] Rewrite relative paths using `dyn.Location` of the underlying value (#1273) ## Changes This change addresses the path resolution behavior in resource definitions. Previously, all paths were resolved relative to where the resource was first defined, which could lead to confusion and errors when paths were specified in different directories. The new behavior is to resolve paths relative to where they are defined, making it more intuitive. However, to avoid breaking existing configurations, compatibility with the old behavior is maintained. ## Tests * Existing unit tests for path translation pass. * Additional test to cover both the nominal and the fallback behavior. --- bundle/config/mutator/translate_paths.go | 79 ++++---- .../mutator/translate_paths_artifacts.go | 53 ++--- bundle/config/mutator/translate_paths_jobs.go | 187 ++++++++---------- .../mutator/translate_paths_pipelines.go | 82 ++++---- bundle/config/mutator/translate_paths_test.go | 8 +- bundle/tests/bundle/loader.go | 26 +++ .../bundle/pipeline_glob_paths/databricks.yml | 16 +- .../tests/bundle/pipeline_glob_paths_test.go | 37 ++-- .../tests/path_translation/fallback/README.md | 42 ++++ .../path_translation/fallback/databricks.yml | 13 ++ .../fallback/override_job.yml | 41 ++++ .../fallback/override_pipeline.yml | 13 ++ .../fallback/resources/my_job.yml | 36 ++++ .../fallback/resources/my_pipeline.yml | 9 + .../fallback/src/dbt_project/.gitkeep | 0 .../path_translation/fallback/src/file.py | 1 + .../path_translation/fallback/src/file1.py | 1 + .../path_translation/fallback/src/file2.py | 1 + .../path_translation/fallback/src/notebook.py | 2 + .../fallback/src/notebook1.py | 2 + .../fallback/src/notebook2.py | 2 + .../path_translation/fallback/src/sql.sql | 1 + .../tests/path_translation/nominal/README.md | 6 + .../path_translation/nominal/databricks.yml | 13 ++ .../path_translation/nominal/override_job.yml | 53 +++++ .../nominal/override_pipeline.yml | 13 ++ .../nominal/resources/my_job.yml | 48 +++++ .../nominal/resources/my_pipeline.yml | 9 + .../nominal/src/dbt_project/.gitkeep | 0 .../path_translation/nominal/src/file.py | 1 + .../path_translation/nominal/src/file1.py | 1 + .../path_translation/nominal/src/file2.py | 1 + .../path_translation/nominal/src/notebook.py | 2 + .../path_translation/nominal/src/notebook1.py | 2 + .../path_translation/nominal/src/notebook2.py | 2 + .../path_translation/nominal/src/sql.sql | 1 + bundle/tests/path_translation_test.go | 112 +++++++++++ libs/dyn/pattern.go | 8 + libs/dyn/pattern_test.go | 15 ++ 39 files changed, 706 insertions(+), 233 deletions(-) create mode 100644 bundle/tests/bundle/loader.go create mode 100644 bundle/tests/path_translation/fallback/README.md create mode 100644 bundle/tests/path_translation/fallback/databricks.yml create mode 100644 bundle/tests/path_translation/fallback/override_job.yml create mode 100644 bundle/tests/path_translation/fallback/override_pipeline.yml create mode 100644 bundle/tests/path_translation/fallback/resources/my_job.yml create mode 100644 bundle/tests/path_translation/fallback/resources/my_pipeline.yml create mode 100644 bundle/tests/path_translation/fallback/src/dbt_project/.gitkeep create mode 100644 bundle/tests/path_translation/fallback/src/file.py create mode 100644 bundle/tests/path_translation/fallback/src/file1.py create mode 100644 bundle/tests/path_translation/fallback/src/file2.py create mode 100644 bundle/tests/path_translation/fallback/src/notebook.py create mode 100644 bundle/tests/path_translation/fallback/src/notebook1.py create mode 100644 bundle/tests/path_translation/fallback/src/notebook2.py create mode 100644 bundle/tests/path_translation/fallback/src/sql.sql create mode 100644 bundle/tests/path_translation/nominal/README.md create mode 100644 bundle/tests/path_translation/nominal/databricks.yml create mode 100644 bundle/tests/path_translation/nominal/override_job.yml create mode 100644 bundle/tests/path_translation/nominal/override_pipeline.yml create mode 100644 bundle/tests/path_translation/nominal/resources/my_job.yml create mode 100644 bundle/tests/path_translation/nominal/resources/my_pipeline.yml create mode 100644 bundle/tests/path_translation/nominal/src/dbt_project/.gitkeep create mode 100644 bundle/tests/path_translation/nominal/src/file.py create mode 100644 bundle/tests/path_translation/nominal/src/file1.py create mode 100644 bundle/tests/path_translation/nominal/src/file2.py create mode 100644 bundle/tests/path_translation/nominal/src/notebook.py create mode 100644 bundle/tests/path_translation/nominal/src/notebook1.py create mode 100644 bundle/tests/path_translation/nominal/src/notebook2.py create mode 100644 bundle/tests/path_translation/nominal/src/sql.sql create mode 100644 bundle/tests/path_translation_test.go diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index b4a17afc7..ac1da5bf2 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/notebook" ) @@ -150,55 +151,55 @@ func translateNoOp(literal, localFullPath, localRelPath, remotePath string) (str return localRelPath, nil } -type transformer struct { - // A directory path relative to which `path` will be transformed - dir string - // A path to transform - path *string - // Name of the config property where the path string is coming from - configPath string - // A function that performs the actual rewriting logic. - fn rewriteFunc +func (m *translatePaths) rewriteValue(b *bundle.Bundle, p dyn.Path, v dyn.Value, fn rewriteFunc, dir string) (dyn.Value, error) { + out := v.MustString() + err := m.rewritePath(dir, b, &out, fn) + if err != nil { + if target := (&ErrIsNotebook{}); errors.As(err, target) { + return dyn.InvalidValue, fmt.Errorf(`expected a file for "%s" but got a notebook: %w`, p, target) + } + if target := (&ErrIsNotNotebook{}); errors.As(err, target) { + return dyn.InvalidValue, fmt.Errorf(`expected a notebook for "%s" but got a file: %w`, p, target) + } + return dyn.InvalidValue, err + } + + return dyn.NewValue(out, v.Location()), nil } -type transformFunc func(resource any, dir string) *transformer +func (m *translatePaths) rewriteRelativeTo(b *bundle.Bundle, p dyn.Path, v dyn.Value, fn rewriteFunc, dir, fallback string) (dyn.Value, error) { + nv, err := m.rewriteValue(b, p, v, fn, dir) + if err == nil { + return nv, nil + } -// Apply all matches transformers for the given resource -func (m *translatePaths) applyTransformers(funcs []transformFunc, b *bundle.Bundle, resource any, dir string) error { - for _, transformFn := range funcs { - transformer := transformFn(resource, dir) - if transformer == nil { - continue - } - - err := m.rewritePath(transformer.dir, b, transformer.path, transformer.fn) - if err != nil { - if target := (&ErrIsNotebook{}); errors.As(err, target) { - return fmt.Errorf(`expected a file for "%s" but got a notebook: %w`, transformer.configPath, target) - } - if target := (&ErrIsNotNotebook{}); errors.As(err, target) { - return fmt.Errorf(`expected a notebook for "%s" but got a file: %w`, transformer.configPath, target) - } - return err + // If we failed to rewrite the path, try to rewrite it relative to the fallback directory. + if fallback != "" { + nv, nerr := m.rewriteValue(b, p, v, fn, fallback) + if nerr == nil { + // TODO: Emit a warning that this path should be rewritten. + return nv, nil } } - return nil + return dyn.InvalidValue, err } func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error { m.seen = make(map[string]string) - for _, fn := range []func(*translatePaths, *bundle.Bundle) error{ - applyJobTransformers, - applyPipelineTransformers, - applyArtifactTransformers, - } { - err := fn(m, b) - if err != nil { - return err + return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var err error + for _, fn := range []func(*bundle.Bundle, dyn.Value) (dyn.Value, error){ + m.applyJobTranslations, + m.applyPipelineTranslations, + m.applyArtifactTranslations, + } { + v, err = fn(b, v) + if err != nil { + return dyn.InvalidValue, err + } } - } - - return nil + return v, nil + }) } diff --git a/bundle/config/mutator/translate_paths_artifacts.go b/bundle/config/mutator/translate_paths_artifacts.go index 91e8397cb..7bda04eec 100644 --- a/bundle/config/mutator/translate_paths_artifacts.go +++ b/bundle/config/mutator/translate_paths_artifacts.go @@ -4,39 +4,40 @@ import ( "fmt" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/dyn" ) -func transformArtifactPath(resource any, dir string) *transformer { - artifact, ok := resource.(*config.Artifact) - if !ok { - return nil - } +func (m *translatePaths) applyArtifactTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { + var err error - return &transformer{ - dir, - &artifact.Path, - "artifacts.path", - translateNoOp, - } -} + // Base pattern to match all artifacts. + base := dyn.NewPattern( + dyn.Key("artifacts"), + dyn.AnyKey(), + ) -func applyArtifactTransformers(m *translatePaths, b *bundle.Bundle) error { - artifactTransformers := []transformFunc{ - transformArtifactPath, - } + for _, t := range []struct { + pattern dyn.Pattern + fn rewriteFunc + }{ + { + base.Append(dyn.Key("path")), + translateNoOp, + }, + } { + v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + key := p[1].Key() + dir, err := v.Location().Directory() + if err != nil { + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for artifact %s: %w", key, err) + } - for key, artifact := range b.Config.Artifacts { - dir, err := artifact.ConfigFileDirectory() + return m.rewriteRelativeTo(b, p, v, t.fn, dir, "") + }) if err != nil { - return fmt.Errorf("unable to determine directory for artifact %s: %w", key, err) - } - - err = m.applyTransformers(artifactTransformers, b, artifact, dir) - if err != nil { - return err + return dyn.InvalidValue, err } } - return nil + return v, nil } diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go index d920c2209..e761bda09 100644 --- a/bundle/config/mutator/translate_paths_jobs.go +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -2,132 +2,101 @@ package mutator import ( "fmt" + "slices" "github.com/databricks/cli/bundle" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/cli/libs/dyn" ) -func transformNotebookTask(resource any, dir string) *transformer { - task, ok := resource.(*jobs.Task) - if !ok || task.NotebookTask == nil { - return nil - } +type jobTaskRewritePattern struct { + pattern dyn.Pattern + fn rewriteFunc +} - return &transformer{ - dir, - &task.NotebookTask.NotebookPath, - "tasks.notebook_task.notebook_path", - translateNotebookPath, +func rewritePatterns(base dyn.Pattern) []jobTaskRewritePattern { + return []jobTaskRewritePattern{ + { + base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")), + translateNotebookPath, + }, + { + base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")), + translateFilePath, + }, + { + base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")), + translateDirectoryPath, + }, + { + base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")), + translateFilePath, + }, + { + base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")), + translateNoOp, + }, + { + base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")), + translateNoOp, + }, } } -func transformSparkTask(resource any, dir string) *transformer { - task, ok := resource.(*jobs.Task) - if !ok || task.SparkPythonTask == nil { - return nil - } - - return &transformer{ - dir, - &task.SparkPythonTask.PythonFile, - "tasks.spark_python_task.python_file", - translateFilePath, - } -} - -func transformWhlLibrary(resource any, dir string) *transformer { - library, ok := resource.(*compute.Library) - if !ok || library.Whl == "" { - return nil - } - - return &transformer{ - dir, - &library.Whl, - "libraries.whl", - translateNoOp, // Does not convert to remote path but makes sure that nested paths resolved correctly - } -} - -func transformDbtTask(resource any, dir string) *transformer { - task, ok := resource.(*jobs.Task) - if !ok || task.DbtTask == nil { - return nil - } - - return &transformer{ - dir, - &task.DbtTask.ProjectDirectory, - "tasks.dbt_task.project_directory", - translateDirectoryPath, - } -} - -func transformSqlFileTask(resource any, dir string) *transformer { - task, ok := resource.(*jobs.Task) - if !ok || task.SqlTask == nil || task.SqlTask.File == nil { - return nil - } - - return &transformer{ - dir, - &task.SqlTask.File.Path, - "tasks.sql_task.file.path", - translateFilePath, - } -} - -func transformJarLibrary(resource any, dir string) *transformer { - library, ok := resource.(*compute.Library) - if !ok || library.Jar == "" { - return nil - } - - return &transformer{ - dir, - &library.Jar, - "libraries.jar", - translateNoOp, // Does not convert to remote path but makes sure that nested paths resolved correctly - } -} - -func applyJobTransformers(m *translatePaths, b *bundle.Bundle) error { - jobTransformers := []transformFunc{ - transformNotebookTask, - transformSparkTask, - transformWhlLibrary, - transformJarLibrary, - transformDbtTask, - transformSqlFileTask, - } +func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { + var fallback = make(map[string]string) + var ignore []string + var err error for key, job := range b.Config.Resources.Jobs { dir, err := job.ConfigFileDirectory() if err != nil { - return fmt.Errorf("unable to determine directory for job %s: %w", key, err) + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err) } + // If we cannot resolve the relative path using the [dyn.Value] location itself, + // use the job's location as fallback. This is necessary for backwards compatibility. + fallback[key] = dir + // Do not translate job task paths if using git source if job.GitSource != nil { - continue - } - - for i := 0; i < len(job.Tasks); i++ { - task := &job.Tasks[i] - err := m.applyTransformers(jobTransformers, b, task, dir) - if err != nil { - return err - } - for j := 0; j < len(task.Libraries); j++ { - library := &task.Libraries[j] - err := m.applyTransformers(jobTransformers, b, library, dir) - if err != nil { - return err - } - } + ignore = append(ignore, key) } } - return nil + // Base pattern to match all tasks in all jobs. + base := dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("jobs"), + dyn.AnyKey(), + dyn.Key("tasks"), + dyn.AnyIndex(), + ) + + // Compile list of patterns and their respective rewrite functions. + taskPatterns := rewritePatterns(base) + forEachPatterns := rewritePatterns(base.Append(dyn.Key("for_each_task"), dyn.Key("task"))) + allPatterns := append(taskPatterns, forEachPatterns...) + + for _, t := range allPatterns { + v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + key := p[2].Key() + + // Skip path translation if the job is using git source. + if slices.Contains(ignore, key) { + return v, nil + } + + dir, err := v.Location().Directory() + if err != nil { + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err) + } + + return m.rewriteRelativeTo(b, p, v, t.fn, dir, fallback[key]) + }) + if err != nil { + return dyn.InvalidValue, err + } + } + + return v, nil } diff --git a/bundle/config/mutator/translate_paths_pipelines.go b/bundle/config/mutator/translate_paths_pipelines.go index 1afdb9d51..caec4198e 100644 --- a/bundle/config/mutator/translate_paths_pipelines.go +++ b/bundle/config/mutator/translate_paths_pipelines.go @@ -4,57 +4,59 @@ import ( "fmt" "github.com/databricks/cli/bundle" - "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/cli/libs/dyn" ) -func transformLibraryNotebook(resource any, dir string) *transformer { - library, ok := resource.(*pipelines.PipelineLibrary) - if !ok || library.Notebook == nil { - return nil - } - - return &transformer{ - dir, - &library.Notebook.Path, - "libraries.notebook.path", - translateNotebookPath, - } -} - -func transformLibraryFile(resource any, dir string) *transformer { - library, ok := resource.(*pipelines.PipelineLibrary) - if !ok || library.File == nil { - return nil - } - - return &transformer{ - dir, - &library.File.Path, - "libraries.file.path", - translateFilePath, - } -} - -func applyPipelineTransformers(m *translatePaths, b *bundle.Bundle) error { - pipelineTransformers := []transformFunc{ - transformLibraryNotebook, - transformLibraryFile, - } +func (m *translatePaths) applyPipelineTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { + var fallback = make(map[string]string) + var err error for key, pipeline := range b.Config.Resources.Pipelines { dir, err := pipeline.ConfigFileDirectory() if err != nil { - return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) } - for i := 0; i < len(pipeline.Libraries); i++ { - library := &pipeline.Libraries[i] - err := m.applyTransformers(pipelineTransformers, b, library, dir) + // If we cannot resolve the relative path using the [dyn.Value] location itself, + // use the pipeline's location as fallback. This is necessary for backwards compatibility. + fallback[key] = dir + } + + // Base pattern to match all libraries in all pipelines. + base := dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("pipelines"), + dyn.AnyKey(), + dyn.Key("libraries"), + dyn.AnyIndex(), + ) + + for _, t := range []struct { + pattern dyn.Pattern + fn rewriteFunc + }{ + { + base.Append(dyn.Key("notebook"), dyn.Key("path")), + translateNotebookPath, + }, + { + base.Append(dyn.Key("file"), dyn.Key("path")), + translateFilePath, + }, + } { + v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + key := p[2].Key() + dir, err := v.Location().Directory() if err != nil { - return err + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) } + + return m.rewriteRelativeTo(b, p, v, t.fn, dir, fallback[key]) + }) + if err != nil { + return dyn.InvalidValue, err } } - return nil + return v, nil } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 96ff88f3f..7e2f12ab0 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -547,7 +547,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a file for "tasks.spark_python_task.python_file" but got a notebook`) + assert.ErrorContains(t, err, `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`) } func TestJobNotebookTaskWithFileSourceError(t *testing.T) { @@ -581,7 +581,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a notebook for "tasks.notebook_task.notebook_path" but got a file`) + assert.ErrorContains(t, err, `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`) } func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { @@ -615,7 +615,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a notebook for "libraries.notebook.path" but got a file`) + assert.ErrorContains(t, err, `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`) } func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { @@ -649,5 +649,5 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a file for "libraries.file.path" but got a notebook`) + assert.ErrorContains(t, err, `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`) } diff --git a/bundle/tests/bundle/loader.go b/bundle/tests/bundle/loader.go new file mode 100644 index 000000000..52744ca78 --- /dev/null +++ b/bundle/tests/bundle/loader.go @@ -0,0 +1,26 @@ +package bundle + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/stretchr/testify/require" +) + +func loadTarget(t *testing.T, path, env string) *bundle.Bundle { + ctx := context.Background() + b, err := bundle.Load(ctx, path) + require.NoError(t, err) + err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutatorsForTarget(env)...)) + require.NoError(t, err) + err = bundle.Apply(ctx, b, bundle.Seq( + mutator.RewriteSyncPaths(), + mutator.MergeJobClusters(), + mutator.MergeJobTasks(), + mutator.MergePipelineClusters(), + )) + require.NoError(t, err) + return b +} diff --git a/bundle/tests/bundle/pipeline_glob_paths/databricks.yml b/bundle/tests/bundle/pipeline_glob_paths/databricks.yml index 2e69691c1..d25b977ba 100644 --- a/bundle/tests/bundle/pipeline_glob_paths/databricks.yml +++ b/bundle/tests/bundle/pipeline_glob_paths/databricks.yml @@ -8,5 +8,17 @@ resources: libraries: - notebook: path: ./dlt/* - - notebook: - path: ./non-existent + +targets: + default: + default: true + + error: + default: false + + resources: + pipelines: + nyc_taxi_pipeline: + libraries: + - notebook: + path: ./non-existent diff --git a/bundle/tests/bundle/pipeline_glob_paths_test.go b/bundle/tests/bundle/pipeline_glob_paths_test.go index 8f2b62a6b..ed78c9668 100644 --- a/bundle/tests/bundle/pipeline_glob_paths_test.go +++ b/bundle/tests/bundle/pipeline_glob_paths_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/experimental/mocks" @@ -14,13 +13,8 @@ import ( "github.com/stretchr/testify/require" ) -func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) { - ctx := context.Background() - b, err := bundle.Load(ctx, "./pipeline_glob_paths") - require.NoError(t, err) - - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutatorsForTarget("default")...)) - require.NoError(t, err) +func TestExpandPipelineGlobPaths(t *testing.T) { + b := loadTarget(t, "./pipeline_glob_paths", "default") // Configure mock workspace client m := mocks.NewMockWorkspaceClient(t) @@ -32,13 +26,30 @@ func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) { }, nil) b.SetWorkpaceClient(m.WorkspaceClient) - err = bundle.Apply(ctx, b, phases.Initialize()) - require.Error(t, err) - require.ErrorContains(t, err, "notebook ./non-existent not found") - + ctx := context.Background() + err := bundle.Apply(ctx, b, phases.Initialize()) + require.NoError(t, err) require.Equal( t, - b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Libraries[0].Notebook.Path, "/Users/user@domain.com/.bundle/pipeline_glob_paths/default/files/dlt/nyc_taxi_loader", + b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Libraries[0].Notebook.Path, ) } + +func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) { + b := loadTarget(t, "./pipeline_glob_paths", "error") + + // Configure mock workspace client + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &config.Config{ + Host: "https://mock.databricks.workspace.com", + } + m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{ + UserName: "user@domain.com", + }, nil) + b.SetWorkpaceClient(m.WorkspaceClient) + + ctx := context.Background() + err := bundle.Apply(ctx, b, phases.Initialize()) + require.ErrorContains(t, err, "notebook ./non-existent not found") +} diff --git a/bundle/tests/path_translation/fallback/README.md b/bundle/tests/path_translation/fallback/README.md new file mode 100644 index 000000000..ec5f6c740 --- /dev/null +++ b/bundle/tests/path_translation/fallback/README.md @@ -0,0 +1,42 @@ +# Test path translation (with fallback to previous behavior) + +As of v0.214.0, all paths in a resource definition were resolved relative to the path +where that resource was first defined. If those paths were specified in the same file, +or in a different file in the same directory, this would be intuitive. + +If those paths were specified in a different file in a different directory, they would +still be resolved relative to the original file. + +For example, a job defined in `./resources/my_job.yml` with an override +in `./override.yml` would have to use paths relative to `./resources`. +This is counter-intuitive and error-prone, and we changed this behavior +in https://github.com/databricks/cli/pull/1273. + +## Appendix + +Q: Why did this behavior apply as of v0.214.0? + +A: With the introduction of dynamic configuration loading, we keep track + of the location (file, line, column) where a resource is defined. + This location information is used to perform path translation, but upon + introduction in v0.214.0, the code still used only a single path per resource. + Due to the semantics of merging two `dyn.Value` objects, the location + information of the first existing value is used for the merged value. + This meant that all paths for a resource were resolved relative to the + location where the resource was first defined. + +Q: What was the behavior before v0.214.0? + +A: Before we relied on dynamic configuration loading, all configuration was + maintained in a typed struct. The path for a resource was an unexported field on the + resource and was set right after loading the configuration file that contains it. + Target overrides contained the same path field, and applying a target override + would set the path for the resource to the path of the target override. + This meant that all paths for a resource were resolved relative to the + location where the resource was last defined. + +Q: Why are we maintaining compatibility with the old behavior? + +A: We want to avoid breaking existing configurations that depend on this behavior. + Use of the old behavior should trigger warnings with a call to action to update. + We can include a deprecation timeline to remove the old behavior in the future. diff --git a/bundle/tests/path_translation/fallback/databricks.yml b/bundle/tests/path_translation/fallback/databricks.yml new file mode 100644 index 000000000..92be3f921 --- /dev/null +++ b/bundle/tests/path_translation/fallback/databricks.yml @@ -0,0 +1,13 @@ +bundle: + name: path_translation_fallback + +include: + - "resources/*.yml" + - "override_*.yml" + +targets: + development: + default: true + + error: + default: false diff --git a/bundle/tests/path_translation/fallback/override_job.yml b/bundle/tests/path_translation/fallback/override_job.yml new file mode 100644 index 000000000..c4354b14b --- /dev/null +++ b/bundle/tests/path_translation/fallback/override_job.yml @@ -0,0 +1,41 @@ +targets: + development: + resources: + jobs: + my_job: + tasks: + - task_key: notebook_example + notebook_task: + notebook_path: ../src/notebook.py + + - task_key: spark_python_example + spark_python_task: + python_file: ../src/file.py + + - task_key: dbt_example + dbt_task: + project_directory: ../src/dbt_project + commands: + - "dbt run" + + - task_key: sql_example + sql_task: + file: + path: ../src/sql.sql + warehouse_id: cafef00d + + - task_key: python_wheel_example + python_wheel_task: + package_name: my_package + + # Append library; the path is resolved relative to the job's directory. + libraries: + - whl: ../dist/wheel2.whl + + - task_key: spark_jar_example + spark_jar_task: + main_class_name: com.example.Main + + # Append library; the path is resolved relative to the job's directory. + libraries: + - jar: ../target/jar2.jar diff --git a/bundle/tests/path_translation/fallback/override_pipeline.yml b/bundle/tests/path_translation/fallback/override_pipeline.yml new file mode 100644 index 000000000..e5790256a --- /dev/null +++ b/bundle/tests/path_translation/fallback/override_pipeline.yml @@ -0,0 +1,13 @@ +targets: + development: + resources: + pipelines: + my_pipeline: + + # Append library; the path is resolved relative to the pipeline's directory. + libraries: + - file: + path: ../src/file2.py + + - notebook: + path: ../src/notebook2.py diff --git a/bundle/tests/path_translation/fallback/resources/my_job.yml b/bundle/tests/path_translation/fallback/resources/my_job.yml new file mode 100644 index 000000000..4907df4f0 --- /dev/null +++ b/bundle/tests/path_translation/fallback/resources/my_job.yml @@ -0,0 +1,36 @@ +resources: + jobs: + my_job: + name: "placeholder" + tasks: + - task_key: notebook_example + notebook_task: + notebook_path: "this value is overridden" + + - task_key: spark_python_example + spark_python_task: + python_file: "this value is overridden" + + - task_key: dbt_example + dbt_task: + project_directory: "this value is overridden" + commands: + - "dbt run" + + - task_key: sql_example + sql_task: + file: + path: "this value is overridden" + warehouse_id: cafef00d + + - task_key: python_wheel_example + python_wheel_task: + package_name: my_package + libraries: + - whl: ../dist/wheel1.whl + + - task_key: spark_jar_example + spark_jar_task: + main_class_name: com.example.Main + libraries: + - jar: ../target/jar1.jar diff --git a/bundle/tests/path_translation/fallback/resources/my_pipeline.yml b/bundle/tests/path_translation/fallback/resources/my_pipeline.yml new file mode 100644 index 000000000..457856d1d --- /dev/null +++ b/bundle/tests/path_translation/fallback/resources/my_pipeline.yml @@ -0,0 +1,9 @@ +resources: + pipelines: + my_pipeline: + name: "placeholder" + libraries: + - file: + path: ../src/file1.py + - notebook: + path: ../src/notebook1.py diff --git a/bundle/tests/path_translation/fallback/src/dbt_project/.gitkeep b/bundle/tests/path_translation/fallback/src/dbt_project/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/tests/path_translation/fallback/src/file.py b/bundle/tests/path_translation/fallback/src/file.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/file.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/file1.py b/bundle/tests/path_translation/fallback/src/file1.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/file1.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/file2.py b/bundle/tests/path_translation/fallback/src/file2.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/file2.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/notebook.py b/bundle/tests/path_translation/fallback/src/notebook.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/notebook.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/notebook1.py b/bundle/tests/path_translation/fallback/src/notebook1.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/notebook1.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/notebook2.py b/bundle/tests/path_translation/fallback/src/notebook2.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/notebook2.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/sql.sql b/bundle/tests/path_translation/fallback/src/sql.sql new file mode 100644 index 000000000..24c55832d --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/sql.sql @@ -0,0 +1 @@ +select "Hello, World!" diff --git a/bundle/tests/path_translation/nominal/README.md b/bundle/tests/path_translation/nominal/README.md new file mode 100644 index 000000000..aa7a52ab2 --- /dev/null +++ b/bundle/tests/path_translation/nominal/README.md @@ -0,0 +1,6 @@ +# Test path translation (nominal behavior) + +As of v0.216.0 (PR at https://github.com/databricks/cli/pull/1273), all paths in a resource +definition are resolved relative to the directory of the file where they are defined. + +This is more intuitive than the previous behavior (see `../fallback/README.md` for details). diff --git a/bundle/tests/path_translation/nominal/databricks.yml b/bundle/tests/path_translation/nominal/databricks.yml new file mode 100644 index 000000000..cd425920d --- /dev/null +++ b/bundle/tests/path_translation/nominal/databricks.yml @@ -0,0 +1,13 @@ +bundle: + name: path_translation_nominal + +include: + - "resources/*.yml" + - "override_*.yml" + +targets: + development: + default: true + + error: + default: false diff --git a/bundle/tests/path_translation/nominal/override_job.yml b/bundle/tests/path_translation/nominal/override_job.yml new file mode 100644 index 000000000..9ce90e63e --- /dev/null +++ b/bundle/tests/path_translation/nominal/override_job.yml @@ -0,0 +1,53 @@ +targets: + development: + resources: + jobs: + my_job: + tasks: + - task_key: notebook_example + notebook_task: + notebook_path: ./src/notebook.py + + - task_key: spark_python_example + spark_python_task: + python_file: ./src/file.py + + - task_key: dbt_example + dbt_task: + project_directory: ./src/dbt_project + commands: + - "dbt run" + + - task_key: sql_example + sql_task: + file: + path: ./src/sql.sql + warehouse_id: cafef00d + + - task_key: python_wheel_example + python_wheel_task: + package_name: my_package + + # Append library; the path is resolved relative to this file's directory. + libraries: + - whl: ./dist/wheel2.whl + + - task_key: spark_jar_example + spark_jar_task: + main_class_name: com.example.Main + + # Append library; the path is resolved relative to this file's directory. + libraries: + - jar: ./target/jar2.jar + + - task_key: for_each_notebook_example + for_each_task: + task: + notebook_task: + notebook_path: ./src/notebook.py + + - task_key: for_each_spark_python_example + for_each_task: + task: + spark_python_task: + python_file: ./src/file.py diff --git a/bundle/tests/path_translation/nominal/override_pipeline.yml b/bundle/tests/path_translation/nominal/override_pipeline.yml new file mode 100644 index 000000000..ac1fff410 --- /dev/null +++ b/bundle/tests/path_translation/nominal/override_pipeline.yml @@ -0,0 +1,13 @@ +targets: + development: + resources: + pipelines: + my_pipeline: + + # Append library; the path is resolved relative to this file's directory. + libraries: + - file: + path: src/file2.py + + - notebook: + path: src/notebook2.py diff --git a/bundle/tests/path_translation/nominal/resources/my_job.yml b/bundle/tests/path_translation/nominal/resources/my_job.yml new file mode 100644 index 000000000..2020c9dc8 --- /dev/null +++ b/bundle/tests/path_translation/nominal/resources/my_job.yml @@ -0,0 +1,48 @@ +resources: + jobs: + my_job: + name: "placeholder" + tasks: + - task_key: notebook_example + notebook_task: + notebook_path: "this value is overridden" + + - task_key: spark_python_example + spark_python_task: + python_file: "this value is overridden" + + - task_key: dbt_example + dbt_task: + project_directory: "this value is overridden" + commands: + - "dbt run" + + - task_key: sql_example + sql_task: + file: + path: "this value is overridden" + warehouse_id: cafef00d + + - task_key: python_wheel_example + python_wheel_task: + package_name: my_package + libraries: + - whl: ../dist/wheel1.whl + + - task_key: spark_jar_example + spark_jar_task: + main_class_name: com.example.Main + libraries: + - jar: ../target/jar1.jar + + - task_key: for_each_notebook_example + for_each_task: + task: + notebook_task: + notebook_path: "this value is overridden" + + - task_key: for_each_spark_python_example + for_each_task: + task: + spark_python_task: + python_file: "this value is overridden" diff --git a/bundle/tests/path_translation/nominal/resources/my_pipeline.yml b/bundle/tests/path_translation/nominal/resources/my_pipeline.yml new file mode 100644 index 000000000..457856d1d --- /dev/null +++ b/bundle/tests/path_translation/nominal/resources/my_pipeline.yml @@ -0,0 +1,9 @@ +resources: + pipelines: + my_pipeline: + name: "placeholder" + libraries: + - file: + path: ../src/file1.py + - notebook: + path: ../src/notebook1.py diff --git a/bundle/tests/path_translation/nominal/src/dbt_project/.gitkeep b/bundle/tests/path_translation/nominal/src/dbt_project/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/tests/path_translation/nominal/src/file.py b/bundle/tests/path_translation/nominal/src/file.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/file.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/file1.py b/bundle/tests/path_translation/nominal/src/file1.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/file1.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/file2.py b/bundle/tests/path_translation/nominal/src/file2.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/file2.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/notebook.py b/bundle/tests/path_translation/nominal/src/notebook.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/notebook.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/notebook1.py b/bundle/tests/path_translation/nominal/src/notebook1.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/notebook1.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/notebook2.py b/bundle/tests/path_translation/nominal/src/notebook2.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/notebook2.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/sql.sql b/bundle/tests/path_translation/nominal/src/sql.sql new file mode 100644 index 000000000..24c55832d --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/sql.sql @@ -0,0 +1 @@ +select "Hello, World!" diff --git a/bundle/tests/path_translation_test.go b/bundle/tests/path_translation_test.go new file mode 100644 index 000000000..6c3393450 --- /dev/null +++ b/bundle/tests/path_translation_test.go @@ -0,0 +1,112 @@ +package config_tests + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPathTranslationFallback(t *testing.T) { + b := loadTarget(t, "./path_translation/fallback", "development") + + m := mutator.TranslatePaths() + err := bundle.Apply(context.Background(), b, m) + require.NoError(t, err) + + j := b.Config.Resources.Jobs["my_job"] + assert.Len(t, j.Tasks, 6) + + assert.Equal(t, "notebook_example", filepath.ToSlash(j.Tasks[0].TaskKey)) + assert.Equal(t, "src/notebook", filepath.ToSlash(j.Tasks[0].NotebookTask.NotebookPath)) + + assert.Equal(t, "spark_python_example", filepath.ToSlash(j.Tasks[1].TaskKey)) + assert.Equal(t, "src/file.py", filepath.ToSlash(j.Tasks[1].SparkPythonTask.PythonFile)) + + assert.Equal(t, "dbt_example", filepath.ToSlash(j.Tasks[2].TaskKey)) + assert.Equal(t, "src/dbt_project", filepath.ToSlash(j.Tasks[2].DbtTask.ProjectDirectory)) + + assert.Equal(t, "sql_example", filepath.ToSlash(j.Tasks[3].TaskKey)) + assert.Equal(t, "src/sql.sql", filepath.ToSlash(j.Tasks[3].SqlTask.File.Path)) + + assert.Equal(t, "python_wheel_example", filepath.ToSlash(j.Tasks[4].TaskKey)) + assert.Equal(t, "dist/wheel1.whl", filepath.ToSlash(j.Tasks[4].Libraries[0].Whl)) + assert.Equal(t, "dist/wheel2.whl", filepath.ToSlash(j.Tasks[4].Libraries[1].Whl)) + + assert.Equal(t, "spark_jar_example", filepath.ToSlash(j.Tasks[5].TaskKey)) + assert.Equal(t, "target/jar1.jar", filepath.ToSlash(j.Tasks[5].Libraries[0].Jar)) + assert.Equal(t, "target/jar2.jar", filepath.ToSlash(j.Tasks[5].Libraries[1].Jar)) + + p := b.Config.Resources.Pipelines["my_pipeline"] + assert.Len(t, p.Libraries, 4) + + assert.Equal(t, "src/file1.py", filepath.ToSlash(p.Libraries[0].File.Path)) + assert.Equal(t, "src/notebook1", filepath.ToSlash(p.Libraries[1].Notebook.Path)) + assert.Equal(t, "src/file2.py", filepath.ToSlash(p.Libraries[2].File.Path)) + assert.Equal(t, "src/notebook2", filepath.ToSlash(p.Libraries[3].Notebook.Path)) +} + +func TestPathTranslationFallbackError(t *testing.T) { + b := loadTarget(t, "./path_translation/fallback", "error") + + m := mutator.TranslatePaths() + err := bundle.Apply(context.Background(), b, m) + assert.ErrorContains(t, err, `notebook this value is overridden not found`) +} + +func TestPathTranslationNominal(t *testing.T) { + b := loadTarget(t, "./path_translation/nominal", "development") + + m := mutator.TranslatePaths() + err := bundle.Apply(context.Background(), b, m) + assert.NoError(t, err) + + j := b.Config.Resources.Jobs["my_job"] + assert.Len(t, j.Tasks, 8) + + assert.Equal(t, "notebook_example", filepath.ToSlash(j.Tasks[0].TaskKey)) + assert.Equal(t, "src/notebook", filepath.ToSlash(j.Tasks[0].NotebookTask.NotebookPath)) + + assert.Equal(t, "spark_python_example", filepath.ToSlash(j.Tasks[1].TaskKey)) + assert.Equal(t, "src/file.py", filepath.ToSlash(j.Tasks[1].SparkPythonTask.PythonFile)) + + assert.Equal(t, "dbt_example", filepath.ToSlash(j.Tasks[2].TaskKey)) + assert.Equal(t, "src/dbt_project", filepath.ToSlash(j.Tasks[2].DbtTask.ProjectDirectory)) + + assert.Equal(t, "sql_example", filepath.ToSlash(j.Tasks[3].TaskKey)) + assert.Equal(t, "src/sql.sql", filepath.ToSlash(j.Tasks[3].SqlTask.File.Path)) + + assert.Equal(t, "python_wheel_example", filepath.ToSlash(j.Tasks[4].TaskKey)) + assert.Equal(t, "dist/wheel1.whl", filepath.ToSlash(j.Tasks[4].Libraries[0].Whl)) + assert.Equal(t, "dist/wheel2.whl", filepath.ToSlash(j.Tasks[4].Libraries[1].Whl)) + + assert.Equal(t, "spark_jar_example", filepath.ToSlash(j.Tasks[5].TaskKey)) + assert.Equal(t, "target/jar1.jar", filepath.ToSlash(j.Tasks[5].Libraries[0].Jar)) + assert.Equal(t, "target/jar2.jar", filepath.ToSlash(j.Tasks[5].Libraries[1].Jar)) + + assert.Equal(t, "for_each_notebook_example", filepath.ToSlash(j.Tasks[6].TaskKey)) + assert.Equal(t, "src/notebook", filepath.ToSlash(j.Tasks[6].ForEachTask.Task.NotebookTask.NotebookPath)) + + assert.Equal(t, "for_each_spark_python_example", filepath.ToSlash(j.Tasks[7].TaskKey)) + assert.Equal(t, "src/file.py", filepath.ToSlash(j.Tasks[7].ForEachTask.Task.SparkPythonTask.PythonFile)) + + p := b.Config.Resources.Pipelines["my_pipeline"] + assert.Len(t, p.Libraries, 4) + + assert.Equal(t, "src/file1.py", filepath.ToSlash(p.Libraries[0].File.Path)) + assert.Equal(t, "src/notebook1", filepath.ToSlash(p.Libraries[1].Notebook.Path)) + assert.Equal(t, "src/file2.py", filepath.ToSlash(p.Libraries[2].File.Path)) + assert.Equal(t, "src/notebook2", filepath.ToSlash(p.Libraries[3].Notebook.Path)) +} + +func TestPathTranslationNominalError(t *testing.T) { + b := loadTarget(t, "./path_translation/nominal", "error") + + m := mutator.TranslatePaths() + err := bundle.Apply(context.Background(), b, m) + assert.ErrorContains(t, err, `notebook this value is overridden not found`) +} diff --git a/libs/dyn/pattern.go b/libs/dyn/pattern.go index 7e8b5d6e9..665ba0c54 100644 --- a/libs/dyn/pattern.go +++ b/libs/dyn/pattern.go @@ -33,6 +33,14 @@ func NewPatternFromPath(p Path) Pattern { return cs } +// Append appends the given components to the pattern. +func (p Pattern) Append(cs ...patternComponent) Pattern { + out := make(Pattern, len(p)+len(cs)) + copy(out, p) + copy(out[len(p):], cs) + return out +} + type anyKeyComponent struct{} // AnyKey returns a pattern component that matches any key. diff --git a/libs/dyn/pattern_test.go b/libs/dyn/pattern_test.go index b91af8293..b968f113d 100644 --- a/libs/dyn/pattern_test.go +++ b/libs/dyn/pattern_test.go @@ -26,3 +26,18 @@ func TestNewPatternFromPath(t *testing.T) { pat2 := dyn.NewPatternFromPath(path) assert.Equal(t, pat1, pat2) } + +func TestPatternAppend(t *testing.T) { + p1 := dyn.NewPattern(dyn.Key("foo"), dyn.Index(1)) + p2 := dyn.NewPattern(dyn.Key("foo")).Append(dyn.Index(1)) + assert.Equal(t, p1, p2) +} + +func TestPatternAppendAlwaysNew(t *testing.T) { + p := make(dyn.Pattern, 0, 2).Append(dyn.Key("foo")) + + // There is room for a second element in the slice. + p1 := p.Append(dyn.Index(1)) + p2 := p.Append(dyn.Index(2)) + assert.NotEqual(t, p1, p2) +} From de89af6f8c27fd8e85702bf5e2c85c70813721b0 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 19 Mar 2024 10:47:41 +0100 Subject: [PATCH 087/286] Push deployment state right after files upload (#1293) ## Changes Push deployment state right after files upload ## Tests Integration tests succeed --- bundle/phases/deploy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 52515a7ea..de94c5a0e 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -32,6 +32,7 @@ func Deploy() bundle.Mutator { python.TransformWheelTask(), files.Upload(), deploy.StateUpdate(), + deploy.StatePush(), permissions.ApplyWorkspaceRootPermissions(), terraform.Interpolate(), terraform.Write(), @@ -40,7 +41,6 @@ func Deploy() bundle.Mutator { terraform.Apply(), bundle.Seq( terraform.StatePush(), - deploy.StatePush(), terraform.Load(), metadata.Compute(), metadata.Upload(), From 8255c9d9fbb7dc320672869befcd64197c5929e8 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 19 Mar 2024 10:49:26 +0100 Subject: [PATCH 088/286] Make `Append` function to `dyn.Path` return independent slice (#1295) ## Changes While working on #1273, I found that calls to `Append` on a `dyn.Pattern` were mutating the original slice. This is expected because appending to a slice will mutate in place if the capacity of the original slice is large enough. This change updates the `Append` call on the `dyn.Path` as well to return a newly allocated slice to avoid inadvertently mutating the originals. We have existing call sites in the `dyn` package that mutate a `dyn.Path` (e.g. walk or visit) and these are modified to continue to do this with a direct call to `append`. Callbacks that use the `dyn.Path` argument outside of the callback need to make a copy to ensure it isn't mutated (this is no different from existing semantics). The `Join` function wasn't used and is removed as part of this change. ## Tests Unit tests. --- libs/dyn/path.go | 14 +++++--------- libs/dyn/path_test.go | 16 +++++++--------- libs/dyn/pattern.go | 4 ++-- libs/dyn/pattern_test.go | 15 +++++++++++---- libs/dyn/visit.go | 2 +- libs/dyn/visit_map.go | 4 ++-- libs/dyn/visit_set.go | 2 +- libs/dyn/walk.go | 4 ++-- 8 files changed, 31 insertions(+), 30 deletions(-) diff --git a/libs/dyn/path.go b/libs/dyn/path.go index 91893f921..76377e2dc 100644 --- a/libs/dyn/path.go +++ b/libs/dyn/path.go @@ -49,17 +49,13 @@ func NewPath(cs ...pathComponent) Path { return cs } -// Join joins the given paths. -func (p Path) Join(qs ...Path) Path { - for _, q := range qs { - p = p.Append(q...) - } - return p -} - // Append appends the given components to the path. +// Mutations to the returned path do not affect the original path. func (p Path) Append(cs ...pathComponent) Path { - return append(p, cs...) + out := make(Path, len(p)+len(cs)) + copy(out, p) + copy(out[len(p):], cs) + return out } // Equal returns true if the paths are equal. diff --git a/libs/dyn/path_test.go b/libs/dyn/path_test.go index c4ea26c4a..1152a060a 100644 --- a/libs/dyn/path_test.go +++ b/libs/dyn/path_test.go @@ -19,16 +19,14 @@ func TestPathAppend(t *testing.T) { assert.True(t, p2.Equal(dyn.NewPath(dyn.Key("foo"), dyn.Key("bar"), dyn.Index(1)))) } -func TestPathJoin(t *testing.T) { - p := dyn.NewPath(dyn.Key("foo")) +func TestPathAppendAlwaysNew(t *testing.T) { + p := make(dyn.Path, 0, 2) + p = append(p, dyn.Key("foo")) - // Single arg. - p1 := p.Join(dyn.NewPath(dyn.Key("bar"))) - assert.True(t, p1.Equal(dyn.NewPath(dyn.Key("foo"), dyn.Key("bar")))) - - // Multiple args. - p2 := p.Join(dyn.NewPath(dyn.Key("bar")), dyn.NewPath(dyn.Index(1))) - assert.True(t, p2.Equal(dyn.NewPath(dyn.Key("foo"), dyn.Key("bar"), dyn.Index(1)))) + // There is room for a second element in the slice. + p1 := p.Append(dyn.Index(1)) + p2 := p.Append(dyn.Index(2)) + assert.NotEqual(t, p1, p2) } func TestPathEqualEmpty(t *testing.T) { diff --git a/libs/dyn/pattern.go b/libs/dyn/pattern.go index 665ba0c54..960a50d5b 100644 --- a/libs/dyn/pattern.go +++ b/libs/dyn/pattern.go @@ -58,7 +58,7 @@ func (c anyKeyComponent) visit(v Value, prefix Path, suffix Pattern, opts visitO m = maps.Clone(m) for key, value := range m { var err error - nv, err := visit(value, prefix.Append(Key(key)), suffix, opts) + nv, err := visit(value, append(prefix, Key(key)), suffix, opts) if err != nil { // Leave the value intact if the suffix pattern didn't match any value. if IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { @@ -89,7 +89,7 @@ func (c anyIndexComponent) visit(v Value, prefix Path, suffix Pattern, opts visi s = slices.Clone(s) for i, value := range s { var err error - nv, err := visit(value, prefix.Append(Index(i)), suffix, opts) + nv, err := visit(value, append(prefix, Index(i)), suffix, opts) if err != nil { // Leave the value intact if the suffix pattern didn't match any value. if IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { diff --git a/libs/dyn/pattern_test.go b/libs/dyn/pattern_test.go index b968f113d..372fe7467 100644 --- a/libs/dyn/pattern_test.go +++ b/libs/dyn/pattern_test.go @@ -28,13 +28,20 @@ func TestNewPatternFromPath(t *testing.T) { } func TestPatternAppend(t *testing.T) { - p1 := dyn.NewPattern(dyn.Key("foo"), dyn.Index(1)) - p2 := dyn.NewPattern(dyn.Key("foo")).Append(dyn.Index(1)) - assert.Equal(t, p1, p2) + p := dyn.NewPattern(dyn.Key("foo")) + + // Single arg. + p1 := p.Append(dyn.Key("bar")) + assert.Equal(t, dyn.NewPattern(dyn.Key("foo"), dyn.Key("bar")), p1) + + // Multiple args. + p2 := p.Append(dyn.Key("bar"), dyn.Index(1)) + assert.Equal(t, dyn.NewPattern(dyn.Key("foo"), dyn.Key("bar"), dyn.Index(1)), p2) } func TestPatternAppendAlwaysNew(t *testing.T) { - p := make(dyn.Pattern, 0, 2).Append(dyn.Key("foo")) + p := make(dyn.Pattern, 0, 2) + p = append(p, dyn.Key("foo")) // There is room for a second element in the slice. p1 := p.Append(dyn.Index(1)) diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index ffd8323d4..376dcc22d 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -66,7 +66,7 @@ func visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, erro } func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) { - path := prefix.Append(component) + path := append(prefix, component) switch { case component.isKey(): diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index 05d17c737..18fc668ed 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -18,7 +18,7 @@ func Foreach(fn MapFunc) MapFunc { m := maps.Clone(v.MustMap()) for key, value := range m { var err error - m[key], err = fn(p.Append(Key(key)), value) + m[key], err = fn(append(p, Key(key)), value) if err != nil { return InvalidValue, err } @@ -28,7 +28,7 @@ func Foreach(fn MapFunc) MapFunc { s := slices.Clone(v.MustSequence()) for i, value := range s { var err error - s[i], err = fn(p.Append(Index(i)), value) + s[i], err = fn(append(p, Index(i)), value) if err != nil { return InvalidValue, err } diff --git a/libs/dyn/visit_set.go b/libs/dyn/visit_set.go index b22c3da4a..edcd9bb73 100644 --- a/libs/dyn/visit_set.go +++ b/libs/dyn/visit_set.go @@ -30,7 +30,7 @@ func SetByPath(v Value, p Path, nv Value) (Value, error) { return visit(v, EmptyPath, NewPatternFromPath(p), visitOptions{ fn: func(prefix Path, v Value) (Value, error) { - path := prefix.Append(component) + path := append(prefix, component) switch { case component.isKey(): diff --git a/libs/dyn/walk.go b/libs/dyn/walk.go index 138816be6..26ddfc11d 100644 --- a/libs/dyn/walk.go +++ b/libs/dyn/walk.go @@ -36,7 +36,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro m := v.MustMap() out := make(map[string]Value, len(m)) for k := range m { - nv, err := walk(m[k], p.Append(Key(k)), fn) + nv, err := walk(m[k], append(p, Key(k)), fn) if err == ErrDrop { continue } @@ -50,7 +50,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro s := v.MustSequence() out := make([]Value, 0, len(s)) for i := range s { - nv, err := walk(s[i], p.Append(Index(i)), fn) + nv, err := walk(s[i], append(p, Index(i)), fn) if err == ErrDrop { continue } From 0ef93c2502c537aeff4159b71d3265a11f04cd54 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 20 Mar 2024 14:57:53 +0100 Subject: [PATCH 089/286] Update Go SDK to v0.35.0 (#1300) ## Changes SDK release: https://github.com/databricks/databricks-sdk-go/releases/tag/v0.35.0 ## Tests Tests pass. --- .codegen/_openapi_sha | 2 +- bundle/schema/docs/bundle_descriptions.json | 36 +- .../network-connectivity.go | 11 +- cmd/account/settings/settings.go | 3 - cmd/workspace/catalogs/catalogs.go | 19 +- cmd/workspace/catalogs/overrides.go | 3 +- .../external-locations/external-locations.go | 9 +- cmd/workspace/functions/functions.go | 8 +- .../lakehouse-monitors/lakehouse-monitors.go | 8 +- cmd/workspace/lakeview/lakeview.go | 336 +++++++++++++++++- .../model-versions/model-versions.go | 3 + cmd/workspace/online-tables/online-tables.go | 4 +- .../registered-models/registered-models.go | 3 + cmd/workspace/schemas/schemas.go | 9 +- cmd/workspace/secrets/secrets.go | 3 +- .../serving-endpoints/serving-endpoints.go | 34 +- cmd/workspace/settings/settings.go | 3 - .../storage-credentials.go | 11 +- cmd/workspace/tables/tables.go | 2 + .../vector-search-indexes.go | 1 + cmd/workspace/volumes/volumes.go | 3 + go.mod | 18 +- go.sum | 44 +-- 23 files changed, 464 insertions(+), 109 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index fb91589e9..499e0da40 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -d855b30f25a06fe84f25214efa20e7f1fffcdf9e \ No newline at end of file +3821dc51952c5cf1c276dd84967da011b191e64a \ No newline at end of file diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index d107af73b..53b9be532 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -193,7 +193,7 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." } } }, @@ -725,7 +725,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -785,7 +785,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -1269,7 +1269,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -1371,7 +1371,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -1449,7 +1449,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -1551,7 +1551,7 @@ } }, "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." }, "table": { "description": "Table trigger settings.", @@ -1653,7 +1653,7 @@ } }, "served_entities": { - "description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 10 served entities.", + "description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 15 served entities.", "items": { "description": "", "properties": { @@ -1791,7 +1791,7 @@ } }, "served_models": { - "description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.", + "description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 15 served models.", "items": { "description": "", "properties": { @@ -2726,7 +2726,7 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." } } }, @@ -3258,7 +3258,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -3318,7 +3318,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -3802,7 +3802,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -3904,7 +3904,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -3982,7 +3982,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" } } }, @@ -4084,7 +4084,7 @@ } }, "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether this schedule is paused or not." }, "table": { "description": "Table trigger settings.", @@ -4186,7 +4186,7 @@ } }, "served_entities": { - "description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 10 served entities.", + "description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 15 served entities.", "items": { "description": "", "properties": { @@ -4324,7 +4324,7 @@ } }, "served_models": { - "description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.", + "description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 15 served models.", "items": { "description": "", "properties": { diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index 2b6cf54a1..cd8da2905 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -300,11 +300,12 @@ func newDeletePrivateEndpointRule() *cobra.Command { cmd.Short = `Delete a private endpoint rule.` cmd.Long = `Delete a private endpoint rule. - Initiates deleting a private endpoint rule. The private endpoint will be - deactivated and will be purged after seven days of deactivation. When a - private endpoint is in deactivated state, deactivated field is set to true - and the private endpoint is not available to your serverless compute - resources. + Initiates deleting a private endpoint rule. If the connection state is PENDING + or EXPIRED, the private endpoint is immediately deleted. Otherwise, the + private endpoint is deactivated and will be deleted after seven days of + deactivation. When a private endpoint is deactivated, the deactivated field + is set to true and the private endpoint is not available to your serverless + compute resources. Arguments: NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. diff --git a/cmd/account/settings/settings.go b/cmd/account/settings/settings.go index 29bb6ad15..a750e81e0 100755 --- a/cmd/account/settings/settings.go +++ b/cmd/account/settings/settings.go @@ -23,9 +23,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "settings", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add subservices diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 0d0989b97..8085b69e2 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -210,6 +210,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include catalogs in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get NAME" cmd.Short = `Get a catalog.` cmd.Long = `Get a catalog. @@ -260,11 +262,18 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *catalog.ListCatalogsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq catalog.ListCatalogsRequest + + // TODO: short flags + + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include catalogs in the response for which the principal can only access selective metadata for.`) + cmd.Use = "list" cmd.Short = `List catalogs.` cmd.Long = `List catalogs. @@ -277,11 +286,17 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response := w.Catalogs.List(ctx) + + response := w.Catalogs.List(ctx, listReq) return cmdio.RenderIterator(ctx, response) } @@ -291,7 +306,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd diff --git a/cmd/workspace/catalogs/overrides.go b/cmd/workspace/catalogs/overrides.go index 9ab1bf052..e2201dc15 100644 --- a/cmd/workspace/catalogs/overrides.go +++ b/cmd/workspace/catalogs/overrides.go @@ -2,10 +2,11 @@ package catalogs import ( "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/spf13/cobra" ) -func listOverride(listCmd *cobra.Command) { +func listOverride(listCmd *cobra.Command, listReq *catalog.ListCatalogsRequest) { listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` {{header "Name"}} {{header "Type"}} {{header "Comment"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index a123507ca..bd63d3fa4 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -222,6 +222,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include external locations in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get NAME" cmd.Short = `Get an external location.` cmd.Long = `Get an external location. @@ -282,6 +284,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include external locations in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of external locations to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) @@ -291,10 +294,8 @@ func newList() *cobra.Command { Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller must be a metastore admin, the owner of the external - location, or a user that has some privilege on the external location. For - unpaginated request, there is no guarantee of a specific ordering of the - elements in the array. For paginated request, elements are ordered by their - name.` + location, or a user that has some privilege on the external location. There is + no guarantee of a specific ordering of the elements in the array.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index e4de29b5a..1aa6daf38 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -204,6 +204,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include functions in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get NAME" cmd.Short = `Get a function.` cmd.Long = `Get a function. @@ -281,6 +283,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include functions in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of functions to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) @@ -293,9 +296,8 @@ func newList() *cobra.Command { the user must have the **USE_CATALOG** privilege on the catalog and the **USE_SCHEMA** privilege on the schema, and the output list contains only functions for which either the user has the **EXECUTE** privilege or the user - is the owner. For unpaginated request, there is no guarantee of a specific - ordering of the elements in the array. For paginated request, elements are - ordered by their name. + is the owner. There is no guarantee of a specific ordering of the elements in + the array. Arguments: CATALOG_NAME: Name of parent catalog for functions of interest. diff --git a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go index 95a536a05..7e1fe20be 100755 --- a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go +++ b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go @@ -146,11 +146,11 @@ func newCreate() *cobra.Command { // TODO: array: custom_metrics // TODO: complex arg: data_classification_config // TODO: complex arg: inference_log - // TODO: array: notifications + // TODO: complex arg: notifications // TODO: complex arg: schedule cmd.Flags().BoolVar(&createReq.SkipBuiltinDashboard, "skip-builtin-dashboard", createReq.SkipBuiltinDashboard, `Whether to skip creating a default dashboard summarizing data quality metrics.`) // TODO: array: slicing_exprs - // TODO: output-only field + // TODO: complex arg: snapshot // TODO: complex arg: time_series cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `Optional argument to specify the warehouse for dashboard creation.`) @@ -593,10 +593,10 @@ func newUpdate() *cobra.Command { // TODO: array: custom_metrics // TODO: complex arg: data_classification_config // TODO: complex arg: inference_log - // TODO: array: notifications + // TODO: complex arg: notifications // TODO: complex arg: schedule // TODO: array: slicing_exprs - // TODO: output-only field + // TODO: complex arg: snapshot // TODO: complex arg: time_series cmd.Use = "update FULL_NAME OUTPUT_SCHEMA_NAME" diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index df42e7192..4fc7404a6 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -3,7 +3,10 @@ package lakeview import ( + "fmt" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/spf13/cobra" @@ -27,7 +30,12 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPublished()) cmd.AddCommand(newPublish()) + cmd.AddCommand(newTrash()) + cmd.AddCommand(newUpdate()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { @@ -37,6 +45,201 @@ func New() *cobra.Command { return cmd } +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *dashboards.CreateDashboardRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq dashboards.CreateDashboardRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.ParentPath, "parent-path", createReq.ParentPath, `The workspace path of the folder containing the dashboard.`) + cmd.Flags().StringVar(&createReq.SerializedDashboard, "serialized-dashboard", createReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`) + cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `The warehouse ID used to run the dashboard.`) + + cmd.Use = "create DISPLAY_NAME" + cmd.Short = `Create dashboard.` + cmd.Long = `Create dashboard. + + Create a draft dashboard. + + Arguments: + DISPLAY_NAME: The display name of the dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'display_name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + createReq.DisplayName = args[0] + } + + response, err := w.Lakeview.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *dashboards.GetLakeviewRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq dashboards.GetLakeviewRequest + + // TODO: short flags + + cmd.Use = "get DASHBOARD_ID" + cmd.Short = `Get dashboard.` + cmd.Long = `Get dashboard. + + Get a draft dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.DashboardId = args[0] + + response, err := w.Lakeview.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start get-published command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPublishedOverrides []func( + *cobra.Command, + *dashboards.GetPublishedRequest, +) + +func newGetPublished() *cobra.Command { + cmd := &cobra.Command{} + + var getPublishedReq dashboards.GetPublishedRequest + + // TODO: short flags + + cmd.Use = "get-published DASHBOARD_ID" + cmd.Short = `Get published dashboard.` + cmd.Long = `Get published dashboard. + + Get the current published dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to be published.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPublishedReq.DashboardId = args[0] + + response, err := w.Lakeview.GetPublished(ctx, getPublishedReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPublishedOverrides { + fn(cmd, &getPublishedReq) + } + + return cmd +} + // start publish command // Slice with functions to override default command behavior. @@ -87,11 +290,11 @@ func newPublish() *cobra.Command { } publishReq.DashboardId = args[0] - err = w.Lakeview.Publish(ctx, publishReq) + response, err := w.Lakeview.Publish(ctx, publishReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -106,4 +309,133 @@ func newPublish() *cobra.Command { return cmd } +// start trash command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var trashOverrides []func( + *cobra.Command, + *dashboards.TrashRequest, +) + +func newTrash() *cobra.Command { + cmd := &cobra.Command{} + + var trashReq dashboards.TrashRequest + + // TODO: short flags + + cmd.Use = "trash DASHBOARD_ID" + cmd.Short = `Trash dashboard.` + cmd.Long = `Trash dashboard. + + Trash a dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + trashReq.DashboardId = args[0] + + err = w.Lakeview.Trash(ctx, trashReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range trashOverrides { + fn(cmd, &trashReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *dashboards.UpdateDashboardRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq dashboards.UpdateDashboardRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.DisplayName, "display-name", updateReq.DisplayName, `The display name of the dashboard.`) + cmd.Flags().StringVar(&updateReq.Etag, "etag", updateReq.Etag, `The etag for the dashboard.`) + cmd.Flags().StringVar(&updateReq.SerializedDashboard, "serialized-dashboard", updateReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`) + cmd.Flags().StringVar(&updateReq.WarehouseId, "warehouse-id", updateReq.WarehouseId, `The warehouse ID used to run the dashboard.`) + + cmd.Use = "update DASHBOARD_ID" + cmd.Short = `Update dashboard.` + cmd.Long = `Update dashboard. + + Update a draft dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.DashboardId = args[0] + + response, err := w.Lakeview.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + // end service Lakeview diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index a606b01df..7b556c724 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -133,6 +133,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include model versions in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get FULL_NAME VERSION" cmd.Short = `Get a Model Version.` cmd.Long = `Get a Model Version. @@ -266,6 +268,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include model versions in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of model versions to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) diff --git a/cmd/workspace/online-tables/online-tables.go b/cmd/workspace/online-tables/online-tables.go index a1e21e0f1..da2f8c041 100755 --- a/cmd/workspace/online-tables/online-tables.go +++ b/cmd/workspace/online-tables/online-tables.go @@ -45,13 +45,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createOverrides []func( *cobra.Command, - *catalog.ViewData, + *catalog.CreateOnlineTableRequest, ) func newCreate() *cobra.Command { cmd := &cobra.Command{} - var createReq catalog.ViewData + var createReq catalog.CreateOnlineTableRequest var createJson flags.JsonFlag // TODO: short flags diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index 5d0d26736..08e11d686 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -326,6 +326,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include registered models in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get FULL_NAME" cmd.Short = `Get a Registered Model.` cmd.Long = `Get a Registered Model. @@ -402,6 +404,7 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.CatalogName, "catalog-name", listReq.CatalogName, `The identifier of the catalog under which to list registered models.`) + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include registered models in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Max number of registered models to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token to send for the next page of results (pagination).`) cmd.Flags().StringVar(&listReq.SchemaName, "schema-name", listReq.SchemaName, `The identifier of the schema under which to list registered models.`) diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index 6d9d26f5a..710141913 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -218,6 +218,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include schemas in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get FULL_NAME" cmd.Short = `Get a schema.` cmd.Long = `Get a schema. @@ -290,6 +292,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include schemas in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of schemas to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) @@ -300,10 +303,8 @@ func newList() *cobra.Command { Gets an array of schemas for a catalog in the metastore. If the caller is the metastore admin or the owner of the parent catalog, all schemas for the catalog will be retrieved. Otherwise, only schemas owned by the caller (or for - which the caller has the **USE_SCHEMA** privilege) will be retrieved. For - unpaginated request, there is no guarantee of a specific ordering of the - elements in the array. For paginated request, elements are ordered by their - name. + which the caller has the **USE_SCHEMA** privilege) will be retrieved. There is + no guarantee of a specific ordering of the elements in the array. Arguments: CATALOG_NAME: Parent catalog for schemas of interest.` diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index 981062dfb..f836a2670 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -85,8 +85,7 @@ func newCreateScope() *cobra.Command { cmd.Long = `Create a new secret scope. The scope name must consist of alphanumeric characters, dashes, underscores, - and periods, and may not exceed 128 characters. The maximum number of scopes - in a workspace is 100. + and periods, and may not exceed 128 characters. Arguments: SCOPE: Scope name requested by the user. Scope names are unique.` diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 45dff030a..6706b99ea 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -82,9 +82,8 @@ func newBuildLogs() *cobra.Command { // TODO: short flags cmd.Use = "build-logs NAME SERVED_MODEL_NAME" - cmd.Short = `Retrieve the logs associated with building the model's environment for a given serving endpoint's served model.` - cmd.Long = `Retrieve the logs associated with building the model's environment for a given - serving endpoint's served model. + cmd.Short = `Get build logs for a served model.` + cmd.Long = `Get build logs for a served model. Retrieves the build logs associated with the provided served model. @@ -279,8 +278,8 @@ func newExportMetrics() *cobra.Command { // TODO: short flags cmd.Use = "export-metrics NAME" - cmd.Short = `Retrieve the metrics associated with a serving endpoint.` - cmd.Long = `Retrieve the metrics associated with a serving endpoint. + cmd.Short = `Get metrics of a serving endpoint.` + cmd.Long = `Get metrics of a serving endpoint. Retrieves the metrics associated with the provided serving endpoint in either Prometheus or OpenMetrics exposition format. @@ -509,8 +508,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "list" - cmd.Short = `Retrieve all serving endpoints.` - cmd.Long = `Retrieve all serving endpoints.` + cmd.Short = `Get all serving endpoints.` + cmd.Long = `Get all serving endpoints.` cmd.Annotations = make(map[string]string) @@ -551,9 +550,8 @@ func newLogs() *cobra.Command { // TODO: short flags cmd.Use = "logs NAME SERVED_MODEL_NAME" - cmd.Short = `Retrieve the most recent log lines associated with a given serving endpoint's served model.` - cmd.Long = `Retrieve the most recent log lines associated with a given serving endpoint's - served model. + cmd.Short = `Get the latest logs for a served model.` + cmd.Long = `Get the latest logs for a served model. Retrieves the service logs associated with the provided served model. @@ -619,8 +617,8 @@ func newPatch() *cobra.Command { // TODO: array: delete_tags cmd.Use = "patch NAME" - cmd.Short = `Patch the tags of a serving endpoint.` - cmd.Long = `Patch the tags of a serving endpoint. + cmd.Short = `Update tags of a serving endpoint.` + cmd.Long = `Update tags of a serving endpoint. Used to batch add and delete tags from a serving endpoint with a single API call. @@ -689,8 +687,8 @@ func newPut() *cobra.Command { // TODO: array: rate_limits cmd.Use = "put NAME" - cmd.Short = `Update the rate limits of a serving endpoint.` - cmd.Long = `Update the rate limits of a serving endpoint. + cmd.Short = `Update rate limits of a serving endpoint.` + cmd.Long = `Update rate limits of a serving endpoint. Used to update the rate limits of a serving endpoint. NOTE: only external and foundation model endpoints are supported as of now. @@ -771,8 +769,8 @@ func newQuery() *cobra.Command { cmd.Flags().Float64Var(&queryReq.Temperature, "temperature", queryReq.Temperature, `The temperature field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints.`) cmd.Use = "query NAME" - cmd.Short = `Query a serving endpoint with provided model input.` - cmd.Long = `Query a serving endpoint with provided model input. + cmd.Short = `Query a serving endpoint.` + cmd.Long = `Query a serving endpoint. Arguments: NAME: The name of the serving endpoint. This field is required.` @@ -914,8 +912,8 @@ func newUpdateConfig() *cobra.Command { // TODO: complex arg: traffic_config cmd.Use = "update-config NAME" - cmd.Short = `Update a serving endpoint with a new config.` - cmd.Long = `Update a serving endpoint with a new config. + cmd.Short = `Update config of a serving endpoint.` + cmd.Long = `Update config of a serving endpoint. Updates any combination of the serving endpoint's served entities, the compute configuration of those served entities, and the endpoint's traffic config. An diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 8ba0335fb..38e19e839 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -25,9 +25,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "settings", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add subservices diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 3164baa2b..325945031 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -78,7 +78,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `Comment associated with the credential.`) - // TODO: output-only field + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Supplying true to this argument skips validation of the created credential.`) @@ -310,9 +310,8 @@ func newList() *cobra.Command { Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore admin, retrieval of - credentials is unrestricted. For unpaginated request, there is no guarantee of - a specific ordering of the elements in the array. For paginated request, - elements are ordered by their name.` + credentials is unrestricted. There is no guarantee of a specific ordering of + the elements in the array.` cmd.Annotations = make(map[string]string) @@ -365,7 +364,7 @@ func newUpdate() *cobra.Command { // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`) - // TODO: output-only field + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`) @@ -454,7 +453,7 @@ func newValidate() *cobra.Command { // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token - // TODO: output-only field + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().StringVar(&validateReq.ExternalLocationName, "external-location-name", validateReq.ExternalLocationName, `The name of an existing external location to validate.`) cmd.Flags().BoolVar(&validateReq.ReadOnly, "read-only", validateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) cmd.Flags().StringVar(&validateReq.StorageCredentialName, "storage-credential-name", validateReq.StorageCredentialName, `The name of the storage credential to validate.`) diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index 793fb7a2b..4564b4fe6 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -218,6 +218,7 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include tables in the response for which the principal can only access selective metadata for.`) cmd.Flags().BoolVar(&getReq.IncludeDeltaMetadata, "include-delta-metadata", getReq.IncludeDeltaMetadata, `Whether delta metadata should be included in the response.`) cmd.Use = "get FULL_NAME" @@ -296,6 +297,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include tables in the response for which the principal can only access selective metadata for.`) cmd.Flags().BoolVar(&listReq.IncludeDeltaMetadata, "include-delta-metadata", listReq.IncludeDeltaMetadata, `Whether delta metadata should be included in the response.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of tables to return.`) cmd.Flags().BoolVar(&listReq.OmitColumns, "omit-columns", listReq.OmitColumns, `Whether to omit the columns of the table from the response or not.`) diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index 4e117e5bd..32e023d44 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -416,6 +416,7 @@ func newQueryIndex() *cobra.Command { cmd.Flags().IntVar(&queryIndexReq.NumResults, "num-results", queryIndexReq.NumResults, `Number of results to return.`) cmd.Flags().StringVar(&queryIndexReq.QueryText, "query-text", queryIndexReq.QueryText, `Query text.`) // TODO: array: query_vector + cmd.Flags().Float64Var(&queryIndexReq.ScoreThreshold, "score-threshold", queryIndexReq.ScoreThreshold, `Threshold for the approximate nearest neighbor search.`) cmd.Use = "query-index INDEX_NAME" cmd.Short = `Query an index.` diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 335b7d011..3fc1f447b 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -249,6 +249,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include volumes in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of volumes to return (page length).`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token returned by a previous request.`) @@ -319,6 +320,8 @@ func newRead() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&readReq.IncludeBrowse, "include-browse", readReq.IncludeBrowse, `Whether to include volumes in the response for which the principal can only access selective metadata for.`) + cmd.Use = "read NAME" cmd.Short = `Get a Volume.` cmd.Long = `Get a Volume. diff --git a/go.mod b/go.mod index 832efbc66..4e904fb29 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.34.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.35.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause @@ -54,18 +54,18 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect - go.opentelemetry.io/otel v1.23.0 // indirect - go.opentelemetry.io/otel/metric v1.23.0 // indirect - go.opentelemetry.io/otel/trace v1.23.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/net v0.22.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.166.0 // indirect + google.golang.org/api v0.169.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/grpc v1.61.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect + google.golang.org/grpc v1.62.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 932d480ab..15685fd88 100644 --- a/go.sum +++ b/go.sum @@ -28,8 +28,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.34.0 h1:z4JjgcCk99jAGxx3JgkMsniJFtReWhtAxkgyvtdFqCs= -github.com/databricks/databricks-sdk-go v0.34.0/go.mod h1:MGNWVPqxYCW1vj/xD7DeLT8uChi4lgTFum+iIwDxd/Q= +github.com/databricks/databricks-sdk-go v0.35.0 h1:Z5dflnYEqCreYtuDkwsCPadvRP/aucikI34+gzrvTYQ= +github.com/databricks/databricks-sdk-go v0.35.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -94,8 +94,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= -github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= +github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= @@ -160,16 +160,16 @@ github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= -go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= -go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= -go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= -go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= -go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= -go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -243,8 +243,8 @@ golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.166.0 h1:6m4NUwrZYhAaVIHZWxaKjw1L1vNAjtMwORmKRyEEo24= -google.golang.org/api v0.166.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= +google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= +google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -252,15 +252,15 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= -google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= +google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -272,8 +272,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= From f202596a6fa85451dee82d3ebe98840f8cd05c23 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 21 Mar 2024 11:37:05 +0100 Subject: [PATCH 090/286] Move bundle tests into bundle/tests (#1299) ## Changes These tests were located in `bundle/tests/bundle` which meant they were unable to reuse the helper functions defined in the `bundle/tests` package. There is no need for these tests to live outside the package. ## Tests Existing tests pass. --- bundle/tests/bundle/loader.go | 26 ------------------ .../pipeline_glob_paths/databricks.yml | 0 .../dlt/nyc_taxi_loader.py | 0 .../{bundle => }/pipeline_glob_paths_test.go | 2 +- .../python_wheel/.gitignore | 0 .../python_wheel/bundle.yml | 0 .../python_wheel/my_test_code/setup.py | 0 .../python_wheel/my_test_code/src/__init__.py | 0 .../python_wheel/my_test_code/src/__main__.py | 0 .../python_wheel_dbfs_lib/bundle.yml | 0 .../python_wheel_no_artifact/.gitignore | 0 .../python_wheel_no_artifact/bundle.yml | 0 .../my_test_code/__init__.py | 0 .../my_test_code/__main__.py | 0 .../python_wheel_no_artifact/setup.py | 0 .../.gitignore | 0 .../bundle.yml | 0 .../my_test_code-0.0.1-py3-none-any.whl | Bin .../wheel_test.go => python_wheel_test.go} | 22 +++++++-------- 19 files changed, 12 insertions(+), 38 deletions(-) delete mode 100644 bundle/tests/bundle/loader.go rename bundle/tests/{bundle => }/pipeline_glob_paths/databricks.yml (100%) rename bundle/tests/{bundle => }/pipeline_glob_paths/dlt/nyc_taxi_loader.py (100%) rename bundle/tests/{bundle => }/pipeline_glob_paths_test.go (98%) rename bundle/tests/{bundle => python_wheel}/python_wheel/.gitignore (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel/bundle.yml (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel/my_test_code/setup.py (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel/my_test_code/src/__init__.py (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel/my_test_code/src/__main__.py (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel_dbfs_lib/bundle.yml (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel_no_artifact/.gitignore (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel_no_artifact/bundle.yml (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel_no_artifact/my_test_code/__init__.py (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel_no_artifact/my_test_code/__main__.py (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel_no_artifact/setup.py (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel_no_artifact_no_setup/.gitignore (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel_no_artifact_no_setup/bundle.yml (100%) rename bundle/tests/{bundle => python_wheel}/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl (100%) rename bundle/tests/{bundle/wheel_test.go => python_wheel_test.go} (70%) diff --git a/bundle/tests/bundle/loader.go b/bundle/tests/bundle/loader.go deleted file mode 100644 index 52744ca78..000000000 --- a/bundle/tests/bundle/loader.go +++ /dev/null @@ -1,26 +0,0 @@ -package bundle - -import ( - "context" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" - "github.com/stretchr/testify/require" -) - -func loadTarget(t *testing.T, path, env string) *bundle.Bundle { - ctx := context.Background() - b, err := bundle.Load(ctx, path) - require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutatorsForTarget(env)...)) - require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq( - mutator.RewriteSyncPaths(), - mutator.MergeJobClusters(), - mutator.MergeJobTasks(), - mutator.MergePipelineClusters(), - )) - require.NoError(t, err) - return b -} diff --git a/bundle/tests/bundle/pipeline_glob_paths/databricks.yml b/bundle/tests/pipeline_glob_paths/databricks.yml similarity index 100% rename from bundle/tests/bundle/pipeline_glob_paths/databricks.yml rename to bundle/tests/pipeline_glob_paths/databricks.yml diff --git a/bundle/tests/bundle/pipeline_glob_paths/dlt/nyc_taxi_loader.py b/bundle/tests/pipeline_glob_paths/dlt/nyc_taxi_loader.py similarity index 100% rename from bundle/tests/bundle/pipeline_glob_paths/dlt/nyc_taxi_loader.py rename to bundle/tests/pipeline_glob_paths/dlt/nyc_taxi_loader.py diff --git a/bundle/tests/bundle/pipeline_glob_paths_test.go b/bundle/tests/pipeline_glob_paths_test.go similarity index 98% rename from bundle/tests/bundle/pipeline_glob_paths_test.go rename to bundle/tests/pipeline_glob_paths_test.go index ed78c9668..85a137926 100644 --- a/bundle/tests/bundle/pipeline_glob_paths_test.go +++ b/bundle/tests/pipeline_glob_paths_test.go @@ -1,4 +1,4 @@ -package bundle +package config_tests import ( "context" diff --git a/bundle/tests/bundle/python_wheel/.gitignore b/bundle/tests/python_wheel/python_wheel/.gitignore similarity index 100% rename from bundle/tests/bundle/python_wheel/.gitignore rename to bundle/tests/python_wheel/python_wheel/.gitignore diff --git a/bundle/tests/bundle/python_wheel/bundle.yml b/bundle/tests/python_wheel/python_wheel/bundle.yml similarity index 100% rename from bundle/tests/bundle/python_wheel/bundle.yml rename to bundle/tests/python_wheel/python_wheel/bundle.yml diff --git a/bundle/tests/bundle/python_wheel/my_test_code/setup.py b/bundle/tests/python_wheel/python_wheel/my_test_code/setup.py similarity index 100% rename from bundle/tests/bundle/python_wheel/my_test_code/setup.py rename to bundle/tests/python_wheel/python_wheel/my_test_code/setup.py diff --git a/bundle/tests/bundle/python_wheel/my_test_code/src/__init__.py b/bundle/tests/python_wheel/python_wheel/my_test_code/src/__init__.py similarity index 100% rename from bundle/tests/bundle/python_wheel/my_test_code/src/__init__.py rename to bundle/tests/python_wheel/python_wheel/my_test_code/src/__init__.py diff --git a/bundle/tests/bundle/python_wheel/my_test_code/src/__main__.py b/bundle/tests/python_wheel/python_wheel/my_test_code/src/__main__.py similarity index 100% rename from bundle/tests/bundle/python_wheel/my_test_code/src/__main__.py rename to bundle/tests/python_wheel/python_wheel/my_test_code/src/__main__.py diff --git a/bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml b/bundle/tests/python_wheel/python_wheel_dbfs_lib/bundle.yml similarity index 100% rename from bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml rename to bundle/tests/python_wheel/python_wheel_dbfs_lib/bundle.yml diff --git a/bundle/tests/bundle/python_wheel_no_artifact/.gitignore b/bundle/tests/python_wheel/python_wheel_no_artifact/.gitignore similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact/.gitignore rename to bundle/tests/python_wheel/python_wheel_no_artifact/.gitignore diff --git a/bundle/tests/bundle/python_wheel_no_artifact/bundle.yml b/bundle/tests/python_wheel/python_wheel_no_artifact/bundle.yml similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact/bundle.yml rename to bundle/tests/python_wheel/python_wheel_no_artifact/bundle.yml diff --git a/bundle/tests/bundle/python_wheel_no_artifact/my_test_code/__init__.py b/bundle/tests/python_wheel/python_wheel_no_artifact/my_test_code/__init__.py similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact/my_test_code/__init__.py rename to bundle/tests/python_wheel/python_wheel_no_artifact/my_test_code/__init__.py diff --git a/bundle/tests/bundle/python_wheel_no_artifact/my_test_code/__main__.py b/bundle/tests/python_wheel/python_wheel_no_artifact/my_test_code/__main__.py similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact/my_test_code/__main__.py rename to bundle/tests/python_wheel/python_wheel_no_artifact/my_test_code/__main__.py diff --git a/bundle/tests/bundle/python_wheel_no_artifact/setup.py b/bundle/tests/python_wheel/python_wheel_no_artifact/setup.py similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact/setup.py rename to bundle/tests/python_wheel/python_wheel_no_artifact/setup.py diff --git a/bundle/tests/bundle/python_wheel_no_artifact_no_setup/.gitignore b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/.gitignore similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact_no_setup/.gitignore rename to bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/.gitignore diff --git a/bundle/tests/bundle/python_wheel_no_artifact_no_setup/bundle.yml b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact_no_setup/bundle.yml rename to bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml diff --git a/bundle/tests/bundle/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl rename to bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl diff --git a/bundle/tests/bundle/wheel_test.go b/bundle/tests/python_wheel_test.go similarity index 70% rename from bundle/tests/bundle/wheel_test.go rename to bundle/tests/python_wheel_test.go index 5171241f4..8351e96ae 100644 --- a/bundle/tests/bundle/wheel_test.go +++ b/bundle/tests/python_wheel_test.go @@ -1,4 +1,4 @@ -package bundle +package config_tests import ( "context" @@ -11,16 +11,16 @@ import ( "github.com/stretchr/testify/require" ) -func TestBundlePythonWheelBuild(t *testing.T) { +func TestPythonWheelBuild(t *testing.T) { ctx := context.Background() - b, err := bundle.Load(ctx, "./python_wheel") + b, err := bundle.Load(ctx, "./python_wheel/python_wheel") require.NoError(t, err) m := phases.Build() err = bundle.Apply(ctx, b, m) require.NoError(t, err) - matches, err := filepath.Glob("python_wheel/my_test_code/dist/my_test_code-*.whl") + matches, err := filepath.Glob("./python_wheel/python_wheel/my_test_code/dist/my_test_code-*.whl") require.NoError(t, err) require.Equal(t, 1, len(matches)) @@ -29,16 +29,16 @@ func TestBundlePythonWheelBuild(t *testing.T) { require.NoError(t, err) } -func TestBundlePythonWheelBuildAutoDetect(t *testing.T) { +func TestPythonWheelBuildAutoDetect(t *testing.T) { ctx := context.Background() - b, err := bundle.Load(ctx, "./python_wheel_no_artifact") + b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_artifact") require.NoError(t, err) m := phases.Build() err = bundle.Apply(ctx, b, m) require.NoError(t, err) - matches, err := filepath.Glob("python_wheel/my_test_code/dist/my_test_code-*.whl") + matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact/dist/my_test_code-*.whl") require.NoError(t, err) require.Equal(t, 1, len(matches)) @@ -47,9 +47,9 @@ func TestBundlePythonWheelBuildAutoDetect(t *testing.T) { require.NoError(t, err) } -func TestBundlePythonWheelWithDBFSLib(t *testing.T) { +func TestPythonWheelWithDBFSLib(t *testing.T) { ctx := context.Background() - b, err := bundle.Load(ctx, "./python_wheel_dbfs_lib") + b, err := bundle.Load(ctx, "./python_wheel/python_wheel_dbfs_lib") require.NoError(t, err) m := phases.Build() @@ -61,9 +61,9 @@ func TestBundlePythonWheelWithDBFSLib(t *testing.T) { require.NoError(t, err) } -func TestBundlePythonWheelBuildNoBuildJustUpload(t *testing.T) { +func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) { ctx := context.Background() - b, err := bundle.Load(ctx, "./python_wheel_no_artifact_no_setup") + b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_artifact_no_setup") require.NoError(t, err) m := phases.Build() From fd8dbff63104c0830d89f372eaf01ebcd48aa343 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 22 Mar 2024 14:15:54 +0100 Subject: [PATCH 091/286] Update Go SDK to v0.36.0 (#1304) ## Changes SDK release: https://github.com/databricks/databricks-sdk-go/releases/tag/v0.36.0 No notable differences other than a few type name changes. ## Tests Tests pass. --- .codegen/_openapi_sha | 2 +- bundle/schema/docs/bundle_descriptions.json | 28 +++++++++---------- .../csp-enablement-account.go | 4 +-- .../esm-enablement-account.go | 4 +-- .../personal-compute/personal-compute.go | 8 +++--- .../automatic-cluster-update.go | 4 +-- .../csp-enablement/csp-enablement.go | 4 +-- .../default-namespace/default-namespace.go | 8 +++--- .../esm-enablement/esm-enablement.go | 4 +-- .../ip-access-lists/ip-access-lists.go | 4 +-- cmd/workspace/lakeview/lakeview.go | 12 ++++---- .../restrict-workspace-admins.go | 8 +++--- go.mod | 2 +- go.sum | 4 +-- 14 files changed, 48 insertions(+), 48 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 499e0da40..f26f23179 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -3821dc51952c5cf1c276dd84967da011b191e64a \ No newline at end of file +93763b0d7ae908520c229c786fff28b8fd623261 \ No newline at end of file diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 53b9be532..c6b45a3eb 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -193,7 +193,7 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." } } }, @@ -725,7 +725,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -785,7 +785,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -1269,7 +1269,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -1371,7 +1371,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -1449,7 +1449,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -1551,7 +1551,7 @@ } }, "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." }, "table": { "description": "Table trigger settings.", @@ -2726,7 +2726,7 @@ "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." } } }, @@ -3258,7 +3258,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" @@ -3318,7 +3318,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -3802,7 +3802,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -3904,7 +3904,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -3982,7 +3982,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" } } }, @@ -4084,7 +4084,7 @@ } }, "pause_status": { - "description": "Indicate whether this schedule is paused or not." + "description": "Whether this trigger is paused or not." }, "table": { "description": "Table trigger settings.", diff --git a/cmd/account/csp-enablement-account/csp-enablement-account.go b/cmd/account/csp-enablement-account/csp-enablement-account.go index 5c7b9b926..79819003b 100755 --- a/cmd/account/csp-enablement-account/csp-enablement-account.go +++ b/cmd/account/csp-enablement-account/csp-enablement-account.go @@ -47,13 +47,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetCspEnablementAccountRequest, + *settings.GetCspEnablementAccountSettingRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetCspEnablementAccountRequest + var getReq settings.GetCspEnablementAccountSettingRequest // TODO: short flags diff --git a/cmd/account/esm-enablement-account/esm-enablement-account.go b/cmd/account/esm-enablement-account/esm-enablement-account.go index 0c936c4de..dd407e2e5 100755 --- a/cmd/account/esm-enablement-account/esm-enablement-account.go +++ b/cmd/account/esm-enablement-account/esm-enablement-account.go @@ -45,13 +45,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetEsmEnablementAccountRequest, + *settings.GetEsmEnablementAccountSettingRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetEsmEnablementAccountRequest + var getReq settings.GetEsmEnablementAccountSettingRequest // TODO: short flags diff --git a/cmd/account/personal-compute/personal-compute.go b/cmd/account/personal-compute/personal-compute.go index 7a2a04525..2a14b0b33 100755 --- a/cmd/account/personal-compute/personal-compute.go +++ b/cmd/account/personal-compute/personal-compute.go @@ -53,13 +53,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteOverrides []func( *cobra.Command, - *settings.DeletePersonalComputeRequest, + *settings.DeletePersonalComputeSettingRequest, ) func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteReq settings.DeletePersonalComputeRequest + var deleteReq settings.DeletePersonalComputeSettingRequest // TODO: short flags @@ -108,13 +108,13 @@ func newDelete() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetPersonalComputeRequest, + *settings.GetPersonalComputeSettingRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetPersonalComputeRequest + var getReq settings.GetPersonalComputeSettingRequest // TODO: short flags diff --git a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go index 4e198eb46..2385195bb 100755 --- a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go +++ b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go @@ -42,13 +42,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetAutomaticClusterUpdateRequest, + *settings.GetAutomaticClusterUpdateSettingRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetAutomaticClusterUpdateRequest + var getReq settings.GetAutomaticClusterUpdateSettingRequest // TODO: short flags diff --git a/cmd/workspace/csp-enablement/csp-enablement.go b/cmd/workspace/csp-enablement/csp-enablement.go index 623a7e541..312591564 100755 --- a/cmd/workspace/csp-enablement/csp-enablement.go +++ b/cmd/workspace/csp-enablement/csp-enablement.go @@ -45,13 +45,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetCspEnablementRequest, + *settings.GetCspEnablementSettingRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetCspEnablementRequest + var getReq settings.GetCspEnablementSettingRequest // TODO: short flags diff --git a/cmd/workspace/default-namespace/default-namespace.go b/cmd/workspace/default-namespace/default-namespace.go index 89c11d7cd..b15907bec 100755 --- a/cmd/workspace/default-namespace/default-namespace.go +++ b/cmd/workspace/default-namespace/default-namespace.go @@ -53,13 +53,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteOverrides []func( *cobra.Command, - *settings.DeleteDefaultNamespaceRequest, + *settings.DeleteDefaultNamespaceSettingRequest, ) func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteReq settings.DeleteDefaultNamespaceRequest + var deleteReq settings.DeleteDefaultNamespaceSettingRequest // TODO: short flags @@ -112,13 +112,13 @@ func newDelete() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetDefaultNamespaceRequest, + *settings.GetDefaultNamespaceSettingRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetDefaultNamespaceRequest + var getReq settings.GetDefaultNamespaceSettingRequest // TODO: short flags diff --git a/cmd/workspace/esm-enablement/esm-enablement.go b/cmd/workspace/esm-enablement/esm-enablement.go index be0eed2f8..a65fe2f76 100755 --- a/cmd/workspace/esm-enablement/esm-enablement.go +++ b/cmd/workspace/esm-enablement/esm-enablement.go @@ -47,13 +47,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetEsmEnablementRequest, + *settings.GetEsmEnablementSettingRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetEsmEnablementRequest + var getReq settings.GetEsmEnablementSettingRequest // TODO: short flags diff --git a/cmd/workspace/ip-access-lists/ip-access-lists.go b/cmd/workspace/ip-access-lists/ip-access-lists.go index ec5958b5b..ec8be99f6 100755 --- a/cmd/workspace/ip-access-lists/ip-access-lists.go +++ b/cmd/workspace/ip-access-lists/ip-access-lists.go @@ -243,13 +243,13 @@ func newDelete() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetIpAccessListRequest, + *settings.GetIpAccessList, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetIpAccessListRequest + var getReq settings.GetIpAccessList // TODO: short flags diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 4fc7404a6..8481a6a8c 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -130,13 +130,13 @@ func newCreate() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *dashboards.GetLakeviewRequest, + *dashboards.GetDashboardRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq dashboards.GetLakeviewRequest + var getReq dashboards.GetDashboardRequest // TODO: short flags @@ -188,13 +188,13 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getPublishedOverrides []func( *cobra.Command, - *dashboards.GetPublishedRequest, + *dashboards.GetPublishedDashboardRequest, ) func newGetPublished() *cobra.Command { cmd := &cobra.Command{} - var getPublishedReq dashboards.GetPublishedRequest + var getPublishedReq dashboards.GetPublishedDashboardRequest // TODO: short flags @@ -315,13 +315,13 @@ func newPublish() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var trashOverrides []func( *cobra.Command, - *dashboards.TrashRequest, + *dashboards.TrashDashboardRequest, ) func newTrash() *cobra.Command { cmd := &cobra.Command{} - var trashReq dashboards.TrashRequest + var trashReq dashboards.TrashDashboardRequest // TODO: short flags diff --git a/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go b/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go index e0ca8030f..5e9f59d2c 100755 --- a/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go +++ b/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go @@ -53,13 +53,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteOverrides []func( *cobra.Command, - *settings.DeleteRestrictWorkspaceAdminRequest, + *settings.DeleteRestrictWorkspaceAdminsSettingRequest, ) func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteReq settings.DeleteRestrictWorkspaceAdminRequest + var deleteReq settings.DeleteRestrictWorkspaceAdminsSettingRequest // TODO: short flags @@ -112,13 +112,13 @@ func newDelete() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetRestrictWorkspaceAdminRequest, + *settings.GetRestrictWorkspaceAdminsSettingRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetRestrictWorkspaceAdminRequest + var getReq settings.GetRestrictWorkspaceAdminsSettingRequest // TODO: short flags diff --git a/go.mod b/go.mod index 4e904fb29..d9e6c24f0 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.35.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.36.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 15685fd88..a4a6eb40b 100644 --- a/go.sum +++ b/go.sum @@ -28,8 +28,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.35.0 h1:Z5dflnYEqCreYtuDkwsCPadvRP/aucikI34+gzrvTYQ= -github.com/databricks/databricks-sdk-go v0.35.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= +github.com/databricks/databricks-sdk-go v0.36.0 h1:QOO9VxBh6JmzzPpCHh0h1f4Ijk+Y3mqBtNN1nzp2Nq8= +github.com/databricks/databricks-sdk-go v0.36.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 1b879d44e120d7a27d0fc9ed5420cb216f68dcc1 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 25 Mar 2024 10:17:52 +0100 Subject: [PATCH 092/286] Upgrade Terraform provider to 1.38.0 (#1308) ## Changes Update to the latest release. No schema changes. ## Tests Unit tests pass. Integration to be done as part of the release PR. --- bundle/internal/tf/codegen/schema/version.go | 2 +- bundle/internal/tf/schema/root.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index a41b62257..363ad4e8a 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.37.0" +const ProviderVersion = "1.38.0" diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index f0253c285..118e2857d 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -25,7 +25,7 @@ func NewRoot() *Root { "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ "source": "databricks/databricks", - "version": "1.37.0", + "version": "1.38.0", }, }, }, From 1efebabbf9dbf9650edb7838afffea64306d6c0e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 25 Mar 2024 11:43:16 +0100 Subject: [PATCH 093/286] Release v0.216.0 (#1309) CLI: * Propagate correct `User-Agent` for CLI during OAuth flow ([#1264](https://github.com/databricks/cli/pull/1264)). * Add usage string when command fails with incorrect arguments ([#1276](https://github.com/databricks/cli/pull/1276)). Bundles: * Include `dyn.Path` as argument to the visit callback function ([#1260](https://github.com/databricks/cli/pull/1260)). * Inline logic to set a value in `dyn.SetByPath` ([#1261](https://github.com/databricks/cli/pull/1261)). * Add assertions for the `dyn.Path` argument to the visit callback ([#1265](https://github.com/databricks/cli/pull/1265)). * Add `dyn.MapByPattern` to map a function to values with matching paths ([#1266](https://github.com/databricks/cli/pull/1266)). * Filter current user from resource permissions ([#1262](https://github.com/databricks/cli/pull/1262)). * Retain location annotation when expanding globs for pipeline libraries ([#1274](https://github.com/databricks/cli/pull/1274)). * Added deployment state for bundles ([#1267](https://github.com/databricks/cli/pull/1267)). * Do CheckRunningResource only after terraform.Write ([#1292](https://github.com/databricks/cli/pull/1292)). * Rewrite relative paths using `dyn.Location` of the underlying value ([#1273](https://github.com/databricks/cli/pull/1273)). * Push deployment state right after files upload ([#1293](https://github.com/databricks/cli/pull/1293)). * Make `Append` function to `dyn.Path` return independent slice ([#1295](https://github.com/databricks/cli/pull/1295)). * Move bundle tests into bundle/tests ([#1299](https://github.com/databricks/cli/pull/1299)). * Upgrade Terraform provider to 1.38.0 ([#1308](https://github.com/databricks/cli/pull/1308)). Internal: * Add integration test for mlops-stacks initialization ([#1155](https://github.com/databricks/cli/pull/1155)). * Update actions/setup-python to v5 ([#1290](https://github.com/databricks/cli/pull/1290)). * Update codecov/codecov-action to v4 ([#1291](https://github.com/databricks/cli/pull/1291)). API Changes: * Changed `databricks catalogs list` command. * Changed `databricks online-tables create` command. * Changed `databricks lakeview publish` command. * Added `databricks lakeview create` command. * Added `databricks lakeview get` command. * Added `databricks lakeview get-published` command. * Added `databricks lakeview trash` command. * Added `databricks lakeview update` command. * Moved settings related commands to `databricks settings` and `databricks account settings`. OpenAPI commit 93763b0d7ae908520c229c786fff28b8fd623261 (2024-03-20) Dependency updates: * Bump golang.org/x/oauth2 from 0.17.0 to 0.18.0 ([#1270](https://github.com/databricks/cli/pull/1270)). * Bump golang.org/x/mod from 0.15.0 to 0.16.0 ([#1271](https://github.com/databricks/cli/pull/1271)). * Update Go SDK to v0.35.0 ([#1300](https://github.com/databricks/cli/pull/1300)). * Update Go SDK to v0.36.0 ([#1304](https://github.com/databricks/cli/pull/1304)). --- CHANGELOG.md | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 51c601150..52d7590f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,50 @@ # Version changelog +## 0.216.0 + +CLI: + * Propagate correct `User-Agent` for CLI during OAuth flow ([#1264](https://github.com/databricks/cli/pull/1264)). + * Add usage string when command fails with incorrect arguments ([#1276](https://github.com/databricks/cli/pull/1276)). + +Bundles: + * Include `dyn.Path` as argument to the visit callback function ([#1260](https://github.com/databricks/cli/pull/1260)). + * Inline logic to set a value in `dyn.SetByPath` ([#1261](https://github.com/databricks/cli/pull/1261)). + * Add assertions for the `dyn.Path` argument to the visit callback ([#1265](https://github.com/databricks/cli/pull/1265)). + * Add `dyn.MapByPattern` to map a function to values with matching paths ([#1266](https://github.com/databricks/cli/pull/1266)). + * Filter current user from resource permissions ([#1262](https://github.com/databricks/cli/pull/1262)). + * Retain location annotation when expanding globs for pipeline libraries ([#1274](https://github.com/databricks/cli/pull/1274)). + * Added deployment state for bundles ([#1267](https://github.com/databricks/cli/pull/1267)). + * Do CheckRunningResource only after terraform.Write ([#1292](https://github.com/databricks/cli/pull/1292)). + * Rewrite relative paths using `dyn.Location` of the underlying value ([#1273](https://github.com/databricks/cli/pull/1273)). + * Push deployment state right after files upload ([#1293](https://github.com/databricks/cli/pull/1293)). + * Make `Append` function to `dyn.Path` return independent slice ([#1295](https://github.com/databricks/cli/pull/1295)). + * Move bundle tests into bundle/tests ([#1299](https://github.com/databricks/cli/pull/1299)). + * Upgrade Terraform provider to 1.38.0 ([#1308](https://github.com/databricks/cli/pull/1308)). + +Internal: + * Add integration test for mlops-stacks initialization ([#1155](https://github.com/databricks/cli/pull/1155)). + * Update actions/setup-python to v5 ([#1290](https://github.com/databricks/cli/pull/1290)). + * Update codecov/codecov-action to v4 ([#1291](https://github.com/databricks/cli/pull/1291)). + +API Changes: + * Changed `databricks catalogs list` command. + * Changed `databricks online-tables create` command. + * Changed `databricks lakeview publish` command. + * Added `databricks lakeview create` command. + * Added `databricks lakeview get` command. + * Added `databricks lakeview get-published` command. + * Added `databricks lakeview trash` command. + * Added `databricks lakeview update` command. + * Moved settings related commands to `databricks settings` and `databricks account settings`. + +OpenAPI commit 93763b0d7ae908520c229c786fff28b8fd623261 (2024-03-20) + +Dependency updates: + * Bump golang.org/x/oauth2 from 0.17.0 to 0.18.0 ([#1270](https://github.com/databricks/cli/pull/1270)). + * Bump golang.org/x/mod from 0.15.0 to 0.16.0 ([#1271](https://github.com/databricks/cli/pull/1271)). + * Update Go SDK to v0.35.0 ([#1300](https://github.com/databricks/cli/pull/1300)). + * Update Go SDK to v0.36.0 ([#1304](https://github.com/databricks/cli/pull/1304)). + ## 0.215.0 CLI: From 26094f01a0e06dd6b7f00710ee7a3623f9c09a38 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 25 Mar 2024 12:01:09 +0100 Subject: [PATCH 094/286] Define `dyn.Mapping` to represent maps (#1301) ## Changes Before this change maps were stored as a regular Go map with string keys. This didn't let us capture metadata (location information) for map keys. To address this, this change replaces the use of the regular Go map with a dedicated type for a dynamic map. This type stores the `dyn.Value` for both the key and the value. It uses a map to still allow O(1) lookups and redirects those into a slice. ## Tests * All existing unit tests pass (some with minor modifications due to interface change). * Equality assertions with `assert.Equal` no longer worked because the new `dyn.Mapping` persists the order in which keys are set and is therefore susceptible to map ordering issues. To fix this, I added a `dynassert` package that forwards all assertions to `testify/assert` but intercepts equality for `dyn.Value` arguments. --- libs/dyn/convert/end_to_end_test.go | 2 +- libs/dyn/convert/from_typed.go | 33 +++- libs/dyn/convert/from_typed_test.go | 2 +- libs/dyn/convert/normalize.go | 35 ++-- libs/dyn/convert/normalize_test.go | 2 +- libs/dyn/convert/struct_info_test.go | 2 +- libs/dyn/convert/to_typed.go | 19 ++- libs/dyn/convert/to_typed_test.go | 2 +- libs/dyn/dynassert/assert.go | 113 +++++++++++++ libs/dyn/dynassert/assert_test.go | 45 ++++++ libs/dyn/dynvar/lookup_test.go | 2 +- libs/dyn/dynvar/ref_test.go | 2 +- libs/dyn/dynvar/resolve_test.go | 2 +- libs/dyn/kind.go | 2 +- libs/dyn/kind_test.go | 2 +- libs/dyn/location_test.go | 2 +- libs/dyn/mapping.go | 148 +++++++++++++++++ libs/dyn/mapping_test.go | 204 ++++++++++++++++++++++++ libs/dyn/merge/elements_by_key_test.go | 2 +- libs/dyn/merge/merge.go | 18 +-- libs/dyn/merge/merge_test.go | 2 +- libs/dyn/path_string_test.go | 2 +- libs/dyn/path_test.go | 2 +- libs/dyn/pattern.go | 13 +- libs/dyn/pattern_test.go | 2 +- libs/dyn/value.go | 24 +-- libs/dyn/value_test.go | 12 +- libs/dyn/value_underlying.go | 10 +- libs/dyn/value_underlying_test.go | 2 +- libs/dyn/visit.go | 7 +- libs/dyn/visit_get_test.go | 2 +- libs/dyn/visit_map.go | 11 +- libs/dyn/visit_map_test.go | 2 +- libs/dyn/visit_set.go | 5 +- libs/dyn/visit_set_test.go | 2 +- libs/dyn/walk.go | 10 +- libs/dyn/walk_test.go | 2 +- libs/dyn/yamlloader/loader.go | 17 +- libs/dyn/yamlloader/yaml_anchor_test.go | 2 +- libs/dyn/yamlloader/yaml_error_test.go | 2 +- libs/dyn/yamlloader/yaml_mix_test.go | 2 +- libs/dyn/yamlloader/yaml_test.go | 2 +- libs/dyn/yamlsaver/order_test.go | 2 +- libs/dyn/yamlsaver/saver.go | 19 +-- libs/dyn/yamlsaver/saver_test.go | 2 +- libs/dyn/yamlsaver/utils.go | 4 +- libs/dyn/yamlsaver/utils_test.go | 6 +- 47 files changed, 680 insertions(+), 127 deletions(-) create mode 100644 libs/dyn/dynassert/assert.go create mode 100644 libs/dyn/dynassert/assert_test.go create mode 100644 libs/dyn/mapping.go create mode 100644 libs/dyn/mapping_test.go diff --git a/libs/dyn/convert/end_to_end_test.go b/libs/dyn/convert/end_to_end_test.go index 7c048136e..33902bea8 100644 --- a/libs/dyn/convert/end_to_end_test.go +++ b/libs/dyn/convert/end_to_end_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index 4778edb96..c344d12df 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -71,17 +71,28 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } - out := make(map[string]dyn.Value) + refm, _ := ref.AsMap() + out := dyn.NewMapping() info := getStructInfo(src.Type()) for k, v := range info.FieldValues(src) { + pair, ok := refm.GetPairByString(k) + refk := pair.Key + refv := pair.Value + + // Use nil reference if there is no reference for this key + if !ok { + refk = dyn.V(k) + refv = dyn.NilValue + } + // Convert the field taking into account the reference value (may be equal to config.NilValue). - nv, err := fromTyped(v.Interface(), ref.Get(k)) + nv, err := fromTyped(v.Interface(), refv) if err != nil { return dyn.InvalidValue, err } if nv != dyn.NilValue { - out[k] = nv + out.Set(refk, nv) } } @@ -101,21 +112,31 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { return dyn.NilValue, nil } - out := make(map[string]dyn.Value) + refm, _ := ref.AsMap() + out := dyn.NewMapping() iter := src.MapRange() for iter.Next() { k := iter.Key().String() v := iter.Value() + pair, ok := refm.GetPairByString(k) + refk := pair.Key + refv := pair.Value + + // Use nil reference if there is no reference for this key + if !ok { + refk = dyn.V(k) + refv = dyn.NilValue + } // Convert entry taking into account the reference value (may be equal to dyn.NilValue). - nv, err := fromTyped(v.Interface(), ref.Get(k), includeZeroValues) + nv, err := fromTyped(v.Interface(), refv, includeZeroValues) if err != nil { return dyn.InvalidValue, err } // Every entry is represented, even if it is a nil. // Otherwise, a map with zero-valued structs would yield a nil as well. - out[k] = nv + out.Set(refk, nv) } return dyn.NewValue(out, ref.Location()), nil diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index f7e97fc7e..f75470f42 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index d6539be95..f18b27fd2 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -74,30 +74,32 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen switch src.Kind() { case dyn.KindMap: - out := make(map[string]dyn.Value) + out := dyn.NewMapping() info := getStructInfo(typ) - for k, v := range src.MustMap() { - index, ok := info.Fields[k] + for _, pair := range src.MustMap().Pairs() { + pk := pair.Key + pv := pair.Value + index, ok := info.Fields[pk.MustString()] if !ok { diags = diags.Append(diag.Diagnostic{ Severity: diag.Warning, - Summary: fmt.Sprintf("unknown field: %s", k), - Location: src.Location(), + Summary: fmt.Sprintf("unknown field: %s", pk.MustString()), + Location: pk.Location(), }) continue } // Normalize the value according to the field type. - v, err := n.normalizeType(typ.FieldByIndex(index).Type, v, seen) + nv, err := n.normalizeType(typ.FieldByIndex(index).Type, pv, seen) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. - if !v.IsValid() { + if !nv.IsValid() { continue } } - out[k] = v + out.Set(pk, nv) } // Return the normalized value if missing fields are not included. @@ -107,7 +109,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen // Populate missing fields with their zero values. for k, index := range info.Fields { - if _, ok := out[k]; ok { + if _, ok := out.GetByString(k); ok { continue } @@ -143,7 +145,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen continue } if v.IsValid() { - out[k] = v + out.Set(dyn.V(k), v) } } @@ -160,19 +162,22 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []r switch src.Kind() { case dyn.KindMap: - out := make(map[string]dyn.Value) - for k, v := range src.MustMap() { + out := dyn.NewMapping() + for _, pair := range src.MustMap().Pairs() { + pk := pair.Key + pv := pair.Value + // Normalize the value according to the map element type. - v, err := n.normalizeType(typ.Elem(), v, seen) + nv, err := n.normalizeType(typ.Elem(), pv, seen) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. - if !v.IsValid() { + if !nv.IsValid() { continue } } - out[k] = v + out.Set(pk, nv) } return dyn.NewValue(out, src.Location()), diags diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index a2a6038e4..78c487d3f 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -5,7 +5,7 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestNormalizeStruct(t *testing.T) { diff --git a/libs/dyn/convert/struct_info_test.go b/libs/dyn/convert/struct_info_test.go index 08be3c47e..20348ff60 100644 --- a/libs/dyn/convert/struct_info_test.go +++ b/libs/dyn/convert/struct_info_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestStructInfoPlain(t *testing.T) { diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index 8b3cf3bb8..f10853a2e 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -59,8 +59,11 @@ func toTypedStruct(dst reflect.Value, src dyn.Value) error { dst.SetZero() info := getStructInfo(dst.Type()) - for k, v := range src.MustMap() { - index, ok := info.Fields[k] + for _, pair := range src.MustMap().Pairs() { + pk := pair.Key + pv := pair.Value + + index, ok := info.Fields[pk.MustString()] if !ok { // Ignore unknown fields. // A warning will be printed later. See PR #904. @@ -82,7 +85,7 @@ func toTypedStruct(dst reflect.Value, src dyn.Value) error { f = f.Field(x) } - err := ToTyped(f.Addr().Interface(), v) + err := ToTyped(f.Addr().Interface(), pv) if err != nil { return err } @@ -112,12 +115,14 @@ func toTypedMap(dst reflect.Value, src dyn.Value) error { m := src.MustMap() // Always overwrite. - dst.Set(reflect.MakeMapWithSize(dst.Type(), len(m))) - for k, v := range m { - kv := reflect.ValueOf(k) + dst.Set(reflect.MakeMapWithSize(dst.Type(), m.Len())) + for _, pair := range m.Pairs() { + pk := pair.Key + pv := pair.Value + kv := reflect.ValueOf(pk.MustString()) kt := dst.Type().Key() vv := reflect.New(dst.Type().Elem()) - err := ToTyped(vv.Interface(), v) + err := ToTyped(vv.Interface(), pv) if err != nil { return err } diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index a3c340e81..56d98a3cf 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) diff --git a/libs/dyn/dynassert/assert.go b/libs/dyn/dynassert/assert.go new file mode 100644 index 000000000..dc6676ca2 --- /dev/null +++ b/libs/dyn/dynassert/assert.go @@ -0,0 +1,113 @@ +package dynassert + +import ( + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/assert" +) + +func Equal(t assert.TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + ev, eok := expected.(dyn.Value) + av, aok := actual.(dyn.Value) + if eok && aok && ev.IsValid() && av.IsValid() { + if !assert.Equal(t, ev.AsAny(), av.AsAny(), msgAndArgs...) { + return false + } + + // The values are equal on contents. Now compare the locations. + if !assert.Equal(t, ev.Location(), av.Location(), msgAndArgs...) { + return false + } + + // Walk ev and av and compare the locations of each element. + _, err := dyn.Walk(ev, func(p dyn.Path, evv dyn.Value) (dyn.Value, error) { + avv, err := dyn.GetByPath(av, p) + if assert.NoError(t, err, "unable to get value from actual value at path %v", p.String()) { + assert.Equal(t, evv.Location(), avv.Location()) + } + return evv, nil + }) + return assert.NoError(t, err) + } + + return assert.Equal(t, expected, actual, msgAndArgs...) +} + +func EqualValues(t assert.TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + return assert.EqualValues(t, expected, actual, msgAndArgs...) +} + +func NotEqual(t assert.TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return assert.NotEqual(t, expected, actual, msgAndArgs...) +} + +func Len(t assert.TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + return assert.Len(t, object, length, msgAndArgs...) +} + +func Empty(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return assert.Empty(t, object, msgAndArgs...) +} + +func Nil(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return assert.Nil(t, object, msgAndArgs...) +} + +func NotNil(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return assert.NotNil(t, object, msgAndArgs...) +} + +func NoError(t assert.TestingT, err error, msgAndArgs ...interface{}) bool { + return assert.NoError(t, err, msgAndArgs...) +} + +func Error(t assert.TestingT, err error, msgAndArgs ...interface{}) bool { + return assert.Error(t, err, msgAndArgs...) +} + +func EqualError(t assert.TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + return assert.EqualError(t, theError, errString, msgAndArgs...) +} + +func ErrorContains(t assert.TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + return assert.ErrorContains(t, theError, contains, msgAndArgs...) +} + +func ErrorIs(t assert.TestingT, theError, target error, msgAndArgs ...interface{}) bool { + return assert.ErrorIs(t, theError, target, msgAndArgs...) +} + +func True(t assert.TestingT, value bool, msgAndArgs ...interface{}) bool { + return assert.True(t, value, msgAndArgs...) +} + +func False(t assert.TestingT, value bool, msgAndArgs ...interface{}) bool { + return assert.False(t, value, msgAndArgs...) +} + +func Contains(t assert.TestingT, list interface{}, element interface{}, msgAndArgs ...interface{}) bool { + return assert.Contains(t, list, element, msgAndArgs...) +} + +func NotContains(t assert.TestingT, list interface{}, element interface{}, msgAndArgs ...interface{}) bool { + return assert.NotContains(t, list, element, msgAndArgs...) +} + +func ElementsMatch(t assert.TestingT, listA, listB interface{}, msgAndArgs ...interface{}) bool { + return assert.ElementsMatch(t, listA, listB, msgAndArgs...) +} + +func Panics(t assert.TestingT, f func(), msgAndArgs ...interface{}) bool { + return assert.Panics(t, f, msgAndArgs...) +} + +func PanicsWithValue(t assert.TestingT, expected interface{}, f func(), msgAndArgs ...interface{}) bool { + return assert.PanicsWithValue(t, expected, f, msgAndArgs...) +} + +func PanicsWithError(t assert.TestingT, errString string, f func(), msgAndArgs ...interface{}) bool { + return assert.PanicsWithError(t, errString, f, msgAndArgs...) +} + +func NotPanics(t assert.TestingT, f func(), msgAndArgs ...interface{}) bool { + return assert.NotPanics(t, f, msgAndArgs...) +} diff --git a/libs/dyn/dynassert/assert_test.go b/libs/dyn/dynassert/assert_test.go new file mode 100644 index 000000000..43258bd20 --- /dev/null +++ b/libs/dyn/dynassert/assert_test.go @@ -0,0 +1,45 @@ +package dynassert + +import ( + "go/parser" + "go/token" + "io/fs" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestThatThisTestPackageIsUsed(t *testing.T) { + var base = ".." + var files []string + err := fs.WalkDir(os.DirFS(base), ".", func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { + // Filter this directory. + if filepath.Base(path) == "dynassert" { + return fs.SkipDir + } + } + if ok, _ := filepath.Match("*_test.go", d.Name()); ok { + files = append(files, filepath.Join(base, path)) + } + return nil + }) + require.NoError(t, err) + + // Confirm that none of the test files under `libs/dyn` import the + // `testify/assert` package and instead import this package for asserts. + fset := token.NewFileSet() + for _, file := range files { + f, err := parser.ParseFile(fset, file, nil, parser.ParseComments) + require.NoError(t, err) + + for _, imp := range f.Imports { + if strings.Contains(imp.Path.Value, `github.com/stretchr/testify/assert`) { + t.Errorf("File %s should not import github.com/stretchr/testify/assert", file) + } + } + } +} diff --git a/libs/dyn/dynvar/lookup_test.go b/libs/dyn/dynvar/lookup_test.go index 2341d7208..b78115ee8 100644 --- a/libs/dyn/dynvar/lookup_test.go +++ b/libs/dyn/dynvar/lookup_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/databricks/cli/libs/dyn/dynvar" - "github.com/stretchr/testify/assert" ) func TestDefaultLookup(t *testing.T) { diff --git a/libs/dyn/dynvar/ref_test.go b/libs/dyn/dynvar/ref_test.go index 092237368..aff3643e0 100644 --- a/libs/dyn/dynvar/ref_test.go +++ b/libs/dyn/dynvar/ref_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) diff --git a/libs/dyn/dynvar/resolve_test.go b/libs/dyn/dynvar/resolve_test.go index 304ed9391..bbecbb776 100644 --- a/libs/dyn/dynvar/resolve_test.go +++ b/libs/dyn/dynvar/resolve_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/databricks/cli/libs/dyn/dynvar" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/libs/dyn/kind.go b/libs/dyn/kind.go index 8f51c25c6..9d507fbc5 100644 --- a/libs/dyn/kind.go +++ b/libs/dyn/kind.go @@ -22,7 +22,7 @@ const ( func kindOf(v any) Kind { switch v.(type) { - case map[string]Value: + case Mapping: return KindMap case []Value: return KindSequence diff --git a/libs/dyn/kind_test.go b/libs/dyn/kind_test.go index 84c90713f..9889d31e1 100644 --- a/libs/dyn/kind_test.go +++ b/libs/dyn/kind_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestKindZeroValue(t *testing.T) { diff --git a/libs/dyn/location_test.go b/libs/dyn/location_test.go index 6d856410b..e11f7cb56 100644 --- a/libs/dyn/location_test.go +++ b/libs/dyn/location_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestLocation(t *testing.T) { diff --git a/libs/dyn/mapping.go b/libs/dyn/mapping.go new file mode 100644 index 000000000..668f57ecc --- /dev/null +++ b/libs/dyn/mapping.go @@ -0,0 +1,148 @@ +package dyn + +import ( + "fmt" + "maps" + "slices" +) + +// Pair represents a single key-value pair in a Mapping. +type Pair struct { + Key Value + Value Value +} + +// Mapping represents a key-value map of dynamic values. +// It exists because plain Go maps cannot use dynamic values for keys. +// We need to use dynamic values for keys because it lets us associate metadata +// with keys (i.e. their definition location). Keys must be strings. +type Mapping struct { + pairs []Pair + index map[string]int +} + +// NewMapping creates a new empty Mapping. +func NewMapping() Mapping { + return Mapping{ + pairs: make([]Pair, 0), + index: make(map[string]int), + } +} + +// newMappingWithSize creates a new Mapping preallocated to the specified size. +func newMappingWithSize(size int) Mapping { + return Mapping{ + pairs: make([]Pair, 0, size), + index: make(map[string]int, size), + } +} + +// newMappingFromGoMap creates a new Mapping from a Go map of string keys and dynamic values. +func newMappingFromGoMap(vin map[string]Value) Mapping { + m := newMappingWithSize(len(vin)) + for k, v := range vin { + m.Set(V(k), v) + } + return m +} + +// Pairs returns all the key-value pairs in the Mapping. +func (m Mapping) Pairs() []Pair { + return m.pairs +} + +// Len returns the number of key-value pairs in the Mapping. +func (m Mapping) Len() int { + return len(m.pairs) +} + +// GetPair returns the key-value pair with the specified key. +// It also returns a boolean indicating whether the pair was found. +func (m Mapping) GetPair(key Value) (Pair, bool) { + skey, ok := key.AsString() + if !ok { + return Pair{}, false + } + return m.GetPairByString(skey) +} + +// GetPairByString returns the key-value pair with the specified string key. +// It also returns a boolean indicating whether the pair was found. +func (m Mapping) GetPairByString(skey string) (Pair, bool) { + if i, ok := m.index[skey]; ok { + return m.pairs[i], true + } + return Pair{}, false +} + +// Get returns the value associated with the specified key. +// It also returns a boolean indicating whether the value was found. +func (m Mapping) Get(key Value) (Value, bool) { + p, ok := m.GetPair(key) + return p.Value, ok +} + +// GetByString returns the value associated with the specified string key. +// It also returns a boolean indicating whether the value was found. +func (m *Mapping) GetByString(skey string) (Value, bool) { + p, ok := m.GetPairByString(skey) + return p.Value, ok +} + +// Set sets the value for the given key in the mapping. +// If the key already exists, the value is updated. +// If the key does not exist, a new key-value pair is added. +// The key must be a string, otherwise an error is returned. +func (m *Mapping) Set(key Value, value Value) error { + skey, ok := key.AsString() + if !ok { + return fmt.Errorf("key must be a string, got %s", key.Kind()) + } + + // If the key already exists, update the value. + if i, ok := m.index[skey]; ok { + m.pairs[i].Value = value + return nil + } + + // Otherwise, add a new pair. + m.pairs = append(m.pairs, Pair{key, value}) + if m.index == nil { + m.index = make(map[string]int) + } + m.index[skey] = len(m.pairs) - 1 + return nil +} + +// Keys returns all the keys in the Mapping. +func (m Mapping) Keys() []Value { + keys := make([]Value, 0, len(m.pairs)) + for _, p := range m.pairs { + keys = append(keys, p.Key) + } + return keys +} + +// Values returns all the values in the Mapping. +func (m Mapping) Values() []Value { + values := make([]Value, 0, len(m.pairs)) + for _, p := range m.pairs { + values = append(values, p.Value) + } + return values +} + +// Clone creates a shallow copy of the Mapping. +func (m Mapping) Clone() Mapping { + return Mapping{ + pairs: slices.Clone(m.pairs), + index: maps.Clone(m.index), + } +} + +// Merge merges the key-value pairs from another Mapping into the current Mapping. +func (m *Mapping) Merge(n Mapping) { + for _, p := range n.pairs { + m.Set(p.Key, p.Value) + } +} diff --git a/libs/dyn/mapping_test.go b/libs/dyn/mapping_test.go new file mode 100644 index 000000000..43b24b0c5 --- /dev/null +++ b/libs/dyn/mapping_test.go @@ -0,0 +1,204 @@ +package dyn_test + +import ( + "fmt" + "testing" + + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/stretchr/testify/require" +) + +func TestNewMapping(t *testing.T) { + m := dyn.NewMapping() + assert.Equal(t, 0, m.Len()) +} + +func TestMappingZeroValue(t *testing.T) { + var m dyn.Mapping + assert.Equal(t, 0, m.Len()) + + value, ok := m.Get(dyn.V("key")) + assert.Equal(t, dyn.InvalidValue, value) + assert.False(t, ok) + assert.Len(t, m.Keys(), 0) + assert.Len(t, m.Values(), 0) +} + +func TestMappingGet(t *testing.T) { + var m dyn.Mapping + err := m.Set(dyn.V("key"), dyn.V("value")) + assert.NoError(t, err) + assert.Equal(t, 1, m.Len()) + + // Call GetPair + p, ok := m.GetPair(dyn.V("key")) + assert.True(t, ok) + assert.Equal(t, dyn.V("key"), p.Key) + assert.Equal(t, dyn.V("value"), p.Value) + + // Modify the value to make sure we're not getting a reference + p.Value = dyn.V("newvalue") + + // Call GetPair with invalid key + p, ok = m.GetPair(dyn.V(1234)) + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, p.Key) + assert.Equal(t, dyn.InvalidValue, p.Value) + + // Call GetPair with non-existent key + p, ok = m.GetPair(dyn.V("enoexist")) + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, p.Key) + assert.Equal(t, dyn.InvalidValue, p.Value) + + // Call GetPairByString + p, ok = m.GetPairByString("key") + assert.True(t, ok) + assert.Equal(t, dyn.V("key"), p.Key) + assert.Equal(t, dyn.V("value"), p.Value) + + // Modify the value to make sure we're not getting a reference + p.Value = dyn.V("newvalue") + + // Call GetPairByString with with non-existent key + p, ok = m.GetPairByString("enoexist") + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, p.Key) + assert.Equal(t, dyn.InvalidValue, p.Value) + + // Call Get + value, ok := m.Get(dyn.V("key")) + assert.True(t, ok) + assert.Equal(t, dyn.V("value"), value) + + // Call Get with invalid key + value, ok = m.Get(dyn.V(1234)) + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, value) + + // Call Get with non-existent key + value, ok = m.Get(dyn.V("enoexist")) + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, value) + + // Call GetByString + value, ok = m.GetByString("key") + assert.True(t, ok) + assert.Equal(t, dyn.V("value"), value) + + // Call GetByString with non-existent key + value, ok = m.GetByString("enoexist") + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, value) +} + +func TestMappingSet(t *testing.T) { + var err error + var m dyn.Mapping + + // Set a value + err = m.Set(dyn.V("key1"), dyn.V("foo")) + assert.NoError(t, err) + assert.Equal(t, 1, m.Len()) + + // Confirm the value + value, ok := m.GetByString("key1") + assert.True(t, ok) + assert.Equal(t, dyn.V("foo"), value) + + // Set another value + err = m.Set(dyn.V("key2"), dyn.V("bar")) + assert.NoError(t, err) + assert.Equal(t, 2, m.Len()) + + // Confirm the value + value, ok = m.Get(dyn.V("key2")) + assert.True(t, ok) + assert.Equal(t, dyn.V("bar"), value) + + // Overwrite first value + err = m.Set(dyn.V("key1"), dyn.V("qux")) + assert.NoError(t, err) + assert.Equal(t, 2, m.Len()) + + // Confirm the value + value, ok = m.Get(dyn.V("key1")) + assert.True(t, ok) + assert.Equal(t, dyn.V("qux"), value) + + // Try to set non-string key + err = m.Set(dyn.V(1), dyn.V("qux")) + assert.Error(t, err) + assert.Equal(t, 2, m.Len()) +} + +func TestMappingKeysValues(t *testing.T) { + var err error + + // Configure mapping + var m dyn.Mapping + err = m.Set(dyn.V("key1"), dyn.V("foo")) + assert.NoError(t, err) + err = m.Set(dyn.V("key2"), dyn.V("bar")) + assert.NoError(t, err) + + // Confirm keys + keys := m.Keys() + assert.Len(t, keys, 2) + assert.Contains(t, keys, dyn.V("key1")) + assert.Contains(t, keys, dyn.V("key2")) + + // Confirm values + values := m.Values() + assert.Len(t, values, 2) + assert.Contains(t, values, dyn.V("foo")) + assert.Contains(t, values, dyn.V("bar")) +} + +func TestMappingClone(t *testing.T) { + var err error + + // Configure mapping + var m1 dyn.Mapping + err = m1.Set(dyn.V("key1"), dyn.V("foo")) + assert.NoError(t, err) + err = m1.Set(dyn.V("key2"), dyn.V("bar")) + assert.NoError(t, err) + + // Clone mapping + m2 := m1.Clone() + assert.Equal(t, m1.Len(), m2.Len()) + + // Modify original mapping + err = m1.Set(dyn.V("key1"), dyn.V("qux")) + assert.NoError(t, err) + + // Confirm values + value, ok := m1.Get(dyn.V("key1")) + assert.True(t, ok) + assert.Equal(t, dyn.V("qux"), value) + value, ok = m2.Get(dyn.V("key1")) + assert.True(t, ok) + assert.Equal(t, dyn.V("foo"), value) +} + +func TestMappingMerge(t *testing.T) { + var m1 dyn.Mapping + for i := 0; i < 10; i++ { + err := m1.Set(dyn.V(fmt.Sprintf("%d", i)), dyn.V(i)) + require.NoError(t, err) + } + + var m2 dyn.Mapping + for i := 5; i < 15; i++ { + err := m2.Set(dyn.V(fmt.Sprintf("%d", i)), dyn.V(i)) + require.NoError(t, err) + } + + var out dyn.Mapping + out.Merge(m1) + assert.Equal(t, 10, out.Len()) + out.Merge(m2) + assert.Equal(t, 15, out.Len()) +} diff --git a/libs/dyn/merge/elements_by_key_test.go b/libs/dyn/merge/elements_by_key_test.go index c61f834e5..ef316cc66 100644 --- a/libs/dyn/merge/elements_by_key_test.go +++ b/libs/dyn/merge/elements_by_key_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) diff --git a/libs/dyn/merge/merge.go b/libs/dyn/merge/merge.go index 1cadbea60..69ccf516a 100644 --- a/libs/dyn/merge/merge.go +++ b/libs/dyn/merge/merge.go @@ -51,27 +51,27 @@ func merge(a, b dyn.Value) (dyn.Value, error) { } func mergeMap(a, b dyn.Value) (dyn.Value, error) { - out := make(map[string]dyn.Value) + out := dyn.NewMapping() am := a.MustMap() bm := b.MustMap() // Add the values from a into the output map. - for k, v := range am { - out[k] = v - } + out.Merge(am) // Merge the values from b into the output map. - for k, v := range bm { - if _, ok := out[k]; ok { + for _, pair := range bm.Pairs() { + pk := pair.Key + pv := pair.Value + if ov, ok := out.Get(pk); ok { // If the key already exists, merge the values. - merged, err := merge(out[k], v) + merged, err := merge(ov, pv) if err != nil { return dyn.NilValue, err } - out[k] = merged + out.Set(pk, merged) } else { // Otherwise, just set the value. - out[k] = v + out.Set(pk, pv) } } diff --git a/libs/dyn/merge/merge_test.go b/libs/dyn/merge/merge_test.go index c4928e353..eaaaab16f 100644 --- a/libs/dyn/merge/merge_test.go +++ b/libs/dyn/merge/merge_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestMergeMaps(t *testing.T) { diff --git a/libs/dyn/path_string_test.go b/libs/dyn/path_string_test.go index 9af394c6f..0d64bf110 100644 --- a/libs/dyn/path_string_test.go +++ b/libs/dyn/path_string_test.go @@ -5,7 +5,7 @@ import ( "testing" . "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestNewPathFromString(t *testing.T) { diff --git a/libs/dyn/path_test.go b/libs/dyn/path_test.go index 1152a060a..44df2050b 100644 --- a/libs/dyn/path_test.go +++ b/libs/dyn/path_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestPathAppend(t *testing.T) { diff --git a/libs/dyn/pattern.go b/libs/dyn/pattern.go index 960a50d5b..a265dad08 100644 --- a/libs/dyn/pattern.go +++ b/libs/dyn/pattern.go @@ -2,7 +2,6 @@ package dyn import ( "fmt" - "maps" "slices" ) @@ -55,10 +54,13 @@ func (c anyKeyComponent) visit(v Value, prefix Path, suffix Pattern, opts visitO return InvalidValue, fmt.Errorf("expected a map at %q, found %s", prefix, v.Kind()) } - m = maps.Clone(m) - for key, value := range m { + m = m.Clone() + for _, pair := range m.Pairs() { + pk := pair.Key + pv := pair.Value + var err error - nv, err := visit(value, append(prefix, Key(key)), suffix, opts) + nv, err := visit(pv, append(prefix, Key(pk.MustString())), suffix, opts) if err != nil { // Leave the value intact if the suffix pattern didn't match any value. if IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { @@ -66,7 +68,8 @@ func (c anyKeyComponent) visit(v Value, prefix Path, suffix Pattern, opts visitO } return InvalidValue, err } - m[key] = nv + + m.Set(pk, nv) } return NewValue(m, v.Location()), nil diff --git a/libs/dyn/pattern_test.go b/libs/dyn/pattern_test.go index 372fe7467..1b54953ef 100644 --- a/libs/dyn/pattern_test.go +++ b/libs/dyn/pattern_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestNewPattern(t *testing.T) { diff --git a/libs/dyn/value.go b/libs/dyn/value.go index ecf21abbe..2e8f1b9af 100644 --- a/libs/dyn/value.go +++ b/libs/dyn/value.go @@ -27,14 +27,16 @@ var NilValue = Value{ // V constructs a new Value with the given value. func V(v any) Value { - return Value{ - v: v, - k: kindOf(v), - } + return NewValue(v, Location{}) } // NewValue constructs a new Value with the given value and location. func NewValue(v any, loc Location) Value { + switch vin := v.(type) { + case map[string]Value: + v = newMappingFromGoMap(vin) + } + return Value{ v: v, k: kindOf(v), @@ -72,12 +74,14 @@ func (v Value) AsAny() any { case KindInvalid: panic("invoked AsAny on invalid value") case KindMap: - vv := v.v.(map[string]Value) - m := make(map[string]any, len(vv)) - for k, v := range vv { - m[k] = v.AsAny() + m := v.v.(Mapping) + out := make(map[string]any, m.Len()) + for _, pair := range m.pairs { + pk := pair.Key + pv := pair.Value + out[pk.MustString()] = pv.AsAny() } - return m + return out case KindSequence: vv := v.v.([]Value) a := make([]any, len(vv)) @@ -109,7 +113,7 @@ func (v Value) Get(key string) Value { return NilValue } - vv, ok := m[key] + vv, ok := m.GetByString(key) if !ok { return NilValue } diff --git a/libs/dyn/value_test.go b/libs/dyn/value_test.go index 7c9a9d990..bbdc2c96b 100644 --- a/libs/dyn/value_test.go +++ b/libs/dyn/value_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestInvalidValue(t *testing.T) { @@ -22,14 +22,12 @@ func TestValueIsAnchor(t *testing.T) { func TestValueAsMap(t *testing.T) { var zeroValue dyn.Value - m, ok := zeroValue.AsMap() + _, ok := zeroValue.AsMap() assert.False(t, ok) - assert.Nil(t, m) var intValue = dyn.NewValue(1, dyn.Location{}) - m, ok = intValue.AsMap() + _, ok = intValue.AsMap() assert.False(t, ok) - assert.Nil(t, m) var mapValue = dyn.NewValue( map[string]dyn.Value{ @@ -37,9 +35,9 @@ func TestValueAsMap(t *testing.T) { }, dyn.Location{File: "file", Line: 1, Column: 2}, ) - m, ok = mapValue.AsMap() + m, ok := mapValue.AsMap() assert.True(t, ok) - assert.Len(t, m, 1) + assert.Equal(t, 1, m.Len()) } func TestValueIsValid(t *testing.T) { diff --git a/libs/dyn/value_underlying.go b/libs/dyn/value_underlying.go index c8c503790..2f0f26a1f 100644 --- a/libs/dyn/value_underlying.go +++ b/libs/dyn/value_underlying.go @@ -5,16 +5,16 @@ import ( "time" ) -// AsMap returns the underlying map if this value is a map, +// AsMap returns the underlying mapping if this value is a map, // the zero value and false otherwise. -func (v Value) AsMap() (map[string]Value, bool) { - vv, ok := v.v.(map[string]Value) +func (v Value) AsMap() (Mapping, bool) { + vv, ok := v.v.(Mapping) return vv, ok } -// MustMap returns the underlying map if this value is a map, +// MustMap returns the underlying mapping if this value is a map, // panics otherwise. -func (v Value) MustMap() map[string]Value { +func (v Value) MustMap() Mapping { vv, ok := v.AsMap() if !ok || v.k != KindMap { panic(fmt.Sprintf("expected kind %s, got %s", KindMap, v.k)) diff --git a/libs/dyn/value_underlying_test.go b/libs/dyn/value_underlying_test.go index 17cb95941..9878cfaf9 100644 --- a/libs/dyn/value_underlying_test.go +++ b/libs/dyn/value_underlying_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestValueUnderlyingMap(t *testing.T) { diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index 376dcc22d..3fe356194 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -3,7 +3,6 @@ package dyn import ( "errors" "fmt" - "maps" "slices" ) @@ -77,7 +76,7 @@ func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts } // Lookup current value in the map. - ev, ok := m[component.key] + ev, ok := m.GetByString(component.key) if !ok { return InvalidValue, noSuchKeyError{path} } @@ -94,8 +93,8 @@ func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts } // Return an updated map value. - m = maps.Clone(m) - m[component.key] = nv + m = m.Clone() + m.Set(V(component.key), nv) return Value{ v: m, k: KindMap, diff --git a/libs/dyn/visit_get_test.go b/libs/dyn/visit_get_test.go index 22dce0858..adc307794 100644 --- a/libs/dyn/visit_get_test.go +++ b/libs/dyn/visit_get_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestGetWithEmptyPath(t *testing.T) { diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index 18fc668ed..f5cfea311 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -2,7 +2,6 @@ package dyn import ( "fmt" - "maps" "slices" ) @@ -15,13 +14,15 @@ func Foreach(fn MapFunc) MapFunc { return func(p Path, v Value) (Value, error) { switch v.Kind() { case KindMap: - m := maps.Clone(v.MustMap()) - for key, value := range m { - var err error - m[key], err = fn(append(p, Key(key)), value) + m := v.MustMap().Clone() + for _, pair := range m.Pairs() { + pk := pair.Key + pv := pair.Value + nv, err := fn(append(p, Key(pk.MustString())), pv) if err != nil { return InvalidValue, err } + m.Set(pk, nv) } return NewValue(m, v.Location()), nil case KindSequence: diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index f87f0a40d..df6bad496 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) diff --git a/libs/dyn/visit_set.go b/libs/dyn/visit_set.go index edcd9bb73..b086fb8a9 100644 --- a/libs/dyn/visit_set.go +++ b/libs/dyn/visit_set.go @@ -2,7 +2,6 @@ package dyn import ( "fmt" - "maps" "slices" ) @@ -41,8 +40,8 @@ func SetByPath(v Value, p Path, nv Value) (Value, error) { } // Return an updated map value. - m = maps.Clone(m) - m[component.key] = nv + m = m.Clone() + m.Set(V(component.key), nv) return Value{ v: m, k: KindMap, diff --git a/libs/dyn/visit_set_test.go b/libs/dyn/visit_set_test.go index b38471587..df58941e1 100644 --- a/libs/dyn/visit_set_test.go +++ b/libs/dyn/visit_set_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestSetWithEmptyPath(t *testing.T) { diff --git a/libs/dyn/walk.go b/libs/dyn/walk.go index 26ddfc11d..97b99b061 100644 --- a/libs/dyn/walk.go +++ b/libs/dyn/walk.go @@ -34,16 +34,18 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro switch v.Kind() { case KindMap: m := v.MustMap() - out := make(map[string]Value, len(m)) - for k := range m { - nv, err := walk(m[k], append(p, Key(k)), fn) + out := newMappingWithSize(m.Len()) + for _, pair := range m.Pairs() { + pk := pair.Key + pv := pair.Value + nv, err := walk(pv, append(p, Key(pk.MustString())), fn) if err == ErrDrop { continue } if err != nil { return NilValue, err } - out[k] = nv + out.Set(pk, nv) } v.v = out case KindSequence: diff --git a/libs/dyn/walk_test.go b/libs/dyn/walk_test.go index 1b94ad902..d62b9a4db 100644 --- a/libs/dyn/walk_test.go +++ b/libs/dyn/walk_test.go @@ -5,7 +5,7 @@ import ( "testing" . "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index 899e1d7b8..908793d58 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -92,7 +92,7 @@ func (d *loader) loadSequence(node *yaml.Node, loc dyn.Location) (dyn.Value, err func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { var merge *yaml.Node - acc := make(map[string]dyn.Value) + acc := dyn.NewMapping() for i := 0; i < len(node.Content); i += 2 { key := node.Content[i] val := node.Content[i+1] @@ -116,12 +116,17 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro return dyn.NilValue, errorf(loc, "invalid key tag: %v", st) } + k, err := d.load(key) + if err != nil { + return dyn.NilValue, err + } + v, err := d.load(val) if err != nil { return dyn.NilValue, err } - acc[key.Value] = v + acc.Set(k, v) } if merge == nil { @@ -146,7 +151,7 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro // Build a sequence of values to merge. // The entries that we already accumulated have precedence. - var seq []map[string]dyn.Value + var seq []dyn.Mapping for _, n := range mnodes { v, err := d.load(n) if err != nil { @@ -161,11 +166,9 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro // Append the accumulated entries to the sequence. seq = append(seq, acc) - out := make(map[string]dyn.Value) + out := dyn.NewMapping() for _, m := range seq { - for k, v := range m { - out[k] = v - } + out.Merge(m) } return dyn.NewValue(out, loc), nil diff --git a/libs/dyn/yamlloader/yaml_anchor_test.go b/libs/dyn/yamlloader/yaml_anchor_test.go index 05beb5401..29ce69f0a 100644 --- a/libs/dyn/yamlloader/yaml_anchor_test.go +++ b/libs/dyn/yamlloader/yaml_anchor_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestYAMLAnchor01(t *testing.T) { diff --git a/libs/dyn/yamlloader/yaml_error_test.go b/libs/dyn/yamlloader/yaml_error_test.go index 11c444ad3..0ae424341 100644 --- a/libs/dyn/yamlloader/yaml_error_test.go +++ b/libs/dyn/yamlloader/yaml_error_test.go @@ -5,8 +5,8 @@ import ( "os" "testing" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/databricks/cli/libs/dyn/yamlloader" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) diff --git a/libs/dyn/yamlloader/yaml_mix_test.go b/libs/dyn/yamlloader/yaml_mix_test.go index 307b93dbf..55ded6baf 100644 --- a/libs/dyn/yamlloader/yaml_mix_test.go +++ b/libs/dyn/yamlloader/yaml_mix_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestYAMLMix01(t *testing.T) { diff --git a/libs/dyn/yamlloader/yaml_test.go b/libs/dyn/yamlloader/yaml_test.go index 14269feee..9bb0377dd 100644 --- a/libs/dyn/yamlloader/yaml_test.go +++ b/libs/dyn/yamlloader/yaml_test.go @@ -6,8 +6,8 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/databricks/cli/libs/dyn/yamlloader" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) diff --git a/libs/dyn/yamlsaver/order_test.go b/libs/dyn/yamlsaver/order_test.go index ed2877f6c..ee9dc4752 100644 --- a/libs/dyn/yamlsaver/order_test.go +++ b/libs/dyn/yamlsaver/order_test.go @@ -3,7 +3,7 @@ package yamlsaver import ( "testing" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestOrderReturnsIncreasingIndex(t *testing.T) { diff --git a/libs/dyn/yamlsaver/saver.go b/libs/dyn/yamlsaver/saver.go index 84483a12f..fe4cfb854 100644 --- a/libs/dyn/yamlsaver/saver.go +++ b/libs/dyn/yamlsaver/saver.go @@ -9,7 +9,6 @@ import ( "strconv" "github.com/databricks/cli/libs/dyn" - "golang.org/x/exp/maps" "gopkg.in/yaml.v3" ) @@ -75,25 +74,27 @@ func (s *saver) toYamlNodeWithStyle(v dyn.Value, style yaml.Style) (*yaml.Node, switch v.Kind() { case dyn.KindMap: m, _ := v.AsMap() - keys := maps.Keys(m) + // We're using location lines to define the order of keys in YAML. // The location is set when we convert API response struct to config.Value representation // See convert.convertMap for details - sort.SliceStable(keys, func(i, j int) bool { - return m[keys[i]].Location().Line < m[keys[j]].Location().Line + pairs := m.Pairs() + sort.SliceStable(pairs, func(i, j int) bool { + return pairs[i].Value.Location().Line < pairs[j].Value.Location().Line }) content := make([]*yaml.Node, 0) - for _, k := range keys { - item := m[k] - node := yaml.Node{Kind: yaml.ScalarNode, Value: k, Style: style} + for _, pair := range pairs { + pk := pair.Key + pv := pair.Value + node := yaml.Node{Kind: yaml.ScalarNode, Value: pk.MustString(), Style: style} var nestedNodeStyle yaml.Style - if customStyle, ok := s.hasStyle(k); ok { + if customStyle, ok := s.hasStyle(pk.MustString()); ok { nestedNodeStyle = customStyle } else { nestedNodeStyle = style } - c, err := s.toYamlNodeWithStyle(item, nestedNodeStyle) + c, err := s.toYamlNodeWithStyle(pv, nestedNodeStyle) if err != nil { return nil, err } diff --git a/libs/dyn/yamlsaver/saver_test.go b/libs/dyn/yamlsaver/saver_test.go index ec44a4298..bdf1891cd 100644 --- a/libs/dyn/yamlsaver/saver_test.go +++ b/libs/dyn/yamlsaver/saver_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "gopkg.in/yaml.v3" ) diff --git a/libs/dyn/yamlsaver/utils.go b/libs/dyn/yamlsaver/utils.go index 0fb4064b5..6149491d6 100644 --- a/libs/dyn/yamlsaver/utils.go +++ b/libs/dyn/yamlsaver/utils.go @@ -26,7 +26,9 @@ func ConvertToMapValue(strct any, order *Order, skipFields []string, dst map[str } func skipAndOrder(mv dyn.Value, order *Order, skipFields []string, dst map[string]dyn.Value) (dyn.Value, error) { - for k, v := range mv.MustMap() { + for _, pair := range mv.MustMap().Pairs() { + k := pair.Key.MustString() + v := pair.Value if v.Kind() == dyn.KindNil { continue } diff --git a/libs/dyn/yamlsaver/utils_test.go b/libs/dyn/yamlsaver/utils_test.go index 32c9143be..04b4c404f 100644 --- a/libs/dyn/yamlsaver/utils_test.go +++ b/libs/dyn/yamlsaver/utils_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestConvertToMapValueWithOrder(t *testing.T) { @@ -32,7 +32,7 @@ func TestConvertToMapValueWithOrder(t *testing.T) { result, err := ConvertToMapValue(v, NewOrder([]string{"list", "name", "map"}), []string{"format"}, map[string]dyn.Value{}) assert.NoError(t, err) - assert.Equal(t, map[string]dyn.Value{ + assert.Equal(t, dyn.V(map[string]dyn.Value{ "list": dyn.NewValue([]dyn.Value{ dyn.V("a"), dyn.V("b"), @@ -44,5 +44,5 @@ func TestConvertToMapValueWithOrder(t *testing.T) { "key2": dyn.V("value2"), }, dyn.Location{Line: -1}), "long_name_field": dyn.NewValue("long name goes here", dyn.Location{Line: 1}), - }, result.MustMap()) + }), result) } From 9cf3dbe686302708f8d1afa4fc1f3cd89e2c49e3 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 25 Mar 2024 12:32:45 +0100 Subject: [PATCH 095/286] Use UserName field to identify if service principal is used (#1310) ## Changes Use UserName field to identify if service principal is used ## Tests Integration test passed --- internal/init_test.go | 2 +- libs/auth/service_principal.go | 8 ++++---- libs/template/helpers.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/init_test.go b/internal/init_test.go index bed1119f8..c3cb0127e 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -138,7 +138,7 @@ func TestAccBundleInitHelpers(t *testing.T) { }, { funcName: "{{is_service_principal}}", - expected: strconv.FormatBool(auth.IsServicePrincipal(me.Id)), + expected: strconv.FormatBool(auth.IsServicePrincipal(me.UserName)), }, { funcName: "{{smallest_node_type}}", diff --git a/libs/auth/service_principal.go b/libs/auth/service_principal.go index cb488d16e..5f1854e3a 100644 --- a/libs/auth/service_principal.go +++ b/libs/auth/service_principal.go @@ -4,12 +4,12 @@ import ( "github.com/google/uuid" ) -// Determines whether a given user id is a service principal. -// This function uses a heuristic: if the user id is a UUID, then we assume +// Determines whether a given user name is a service principal. +// This function uses a heuristic: if the user name is a UUID, then we assume // it's a service principal. Unfortunately, the service principal listing API is too // slow for our purposes. And the "users" and "service principals get" APIs // only allow access by workspace admins. -func IsServicePrincipal(userId string) bool { - _, err := uuid.Parse(userId) +func IsServicePrincipal(userName string) bool { + _, err := uuid.Parse(userName) return err == nil } diff --git a/libs/template/helpers.go b/libs/template/helpers.go index 56710dfbd..d15a801d6 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -140,7 +140,7 @@ func loadHelpers(ctx context.Context) template.FuncMap { return false, err } } - result := auth.IsServicePrincipal(cachedUser.Id) + result := auth.IsServicePrincipal(cachedUser.UserName) cachedIsServicePrincipal = &result return result, nil }, From ed194668dbc58a0b1fa64a3609c3dbdf498b0c06 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 25 Mar 2024 15:18:47 +0100 Subject: [PATCH 096/286] Return `diag.Diagnostics` from mutators (#1305) ## Changes This diagnostics type allows us to capture multiple warnings as well as errors in the return value. This is a preparation for returning additional warnings from mutators in case we detect non-fatal problems. * All return statements that previously returned an error now return `diag.FromErr` * All return statements that previously returned `fmt.Errorf` now return `diag.Errorf` * All `err != nil` checks now use `diags.HasError()` or `diags.Error()` ## Tests * Existing tests pass. * I confirmed no call site under `./bundle` or `./cmd/bundle` uses `errors.Is` on the return value from mutators. This is relevant because we cannot wrap errors with `%w` when calling `diag.Errorf` (like `fmt.Errorf`; context in https://github.com/golang/go/issues/47641). --- bundle/artifacts/all.go | 5 +- bundle/artifacts/artifacts.go | 19 ++++---- bundle/artifacts/autodetect.go | 3 +- bundle/artifacts/build.go | 7 +-- bundle/artifacts/infer.go | 5 +- bundle/artifacts/upload.go | 17 +++---- bundle/artifacts/upload_test.go | 11 +++-- bundle/artifacts/whl/autodetect.go | 5 +- bundle/artifacts/whl/build.go | 9 ++-- bundle/artifacts/whl/from_libraries.go | 3 +- bundle/artifacts/whl/infer.go | 5 +- bundle/config/mutator/default_target.go | 3 +- bundle/config/mutator/default_target_test.go | 10 ++-- .../config/mutator/default_workspace_paths.go | 6 +-- .../mutator/default_workspace_paths_test.go | 8 ++-- .../config/mutator/default_workspace_root.go | 7 +-- .../mutator/default_workspace_root_test.go | 5 +- bundle/config/mutator/environments_compat.go | 7 ++- .../mutator/environments_compat_test.go | 13 ++--- .../mutator/expand_pipeline_glob_paths.go | 7 ++- .../expand_pipeline_glob_paths_test.go | 4 +- .../config/mutator/expand_workspace_root.go | 7 +-- .../mutator/expand_workspace_root_test.go | 16 +++---- bundle/config/mutator/if.go | 3 +- bundle/config/mutator/initialize_variables.go | 3 +- .../mutator/initialize_variables_test.go | 8 ++-- .../mutator/initialize_workspace_client.go | 5 +- bundle/config/mutator/load_git_details.go | 9 ++-- bundle/config/mutator/merge_job_clusters.go | 7 ++- .../config/mutator/merge_job_clusters_test.go | 8 ++-- bundle/config/mutator/merge_job_tasks.go | 7 ++- bundle/config/mutator/merge_job_tasks_test.go | 8 ++-- .../config/mutator/merge_pipeline_clusters.go | 7 ++- .../mutator/merge_pipeline_clusters_test.go | 16 +++---- bundle/config/mutator/noop.go | 3 +- bundle/config/mutator/override_compute.go | 6 +-- .../config/mutator/override_compute_test.go | 20 ++++---- .../config/mutator/populate_current_user.go | 5 +- bundle/config/mutator/process_include.go | 9 ++-- bundle/config/mutator/process_include_test.go | 4 +- .../config/mutator/process_root_includes.go | 14 +++--- .../mutator/process_root_includes_test.go | 38 +++++++-------- bundle/config/mutator/process_target_mode.go | 24 +++++----- .../mutator/process_target_mode_test.go | 44 ++++++++--------- .../mutator/resolve_resource_references.go | 5 +- .../resolve_resource_references_test.go | 16 +++---- .../mutator/resolve_variable_references.go | 7 ++- .../resolve_variable_references_test.go | 37 +++++++------- bundle/config/mutator/rewrite_sync_paths.go | 7 ++- .../config/mutator/rewrite_sync_paths_test.go | 16 +++---- bundle/config/mutator/run_as.go | 3 +- .../config/mutator/select_default_target.go | 10 ++-- .../mutator/select_default_target_test.go | 24 +++++----- bundle/config/mutator/select_target.go | 9 ++-- bundle/config/mutator/select_target_test.go | 8 ++-- bundle/config/mutator/set_variables.go | 21 ++++---- bundle/config/mutator/set_variables_test.go | 24 +++++----- bundle/config/mutator/trampoline.go | 5 +- bundle/config/mutator/trampoline_test.go | 4 +- bundle/config/mutator/translate_paths.go | 7 ++- bundle/config/mutator/translate_paths_test.go | 48 +++++++++---------- bundle/config/mutator/validate_git_details.go | 6 +-- .../mutator/validate_git_details_test.go | 14 +++--- bundle/deferred.go | 15 +++--- bundle/deferred_test.go | 32 ++++++++----- bundle/deploy/check_running_resources.go | 11 +++-- bundle/deploy/files/delete.go | 11 +++-- bundle/deploy/files/upload.go | 7 +-- bundle/deploy/lock/acquire.go | 10 ++-- bundle/deploy/lock/release.go | 12 ++--- bundle/deploy/metadata/annotate_jobs.go | 3 +- bundle/deploy/metadata/annotate_jobs_test.go | 9 ++-- bundle/deploy/metadata/compute.go | 6 +-- bundle/deploy/metadata/compute_test.go | 4 +- bundle/deploy/metadata/upload.go | 9 ++-- bundle/deploy/state_pull.go | 23 ++++----- bundle/deploy/state_pull_test.go | 14 +++--- bundle/deploy/state_push.go | 11 +++-- bundle/deploy/state_push_test.go | 4 +- bundle/deploy/state_update.go | 19 ++++---- bundle/deploy/state_update_test.go | 12 ++--- bundle/deploy/terraform/apply.go | 10 ++-- bundle/deploy/terraform/destroy.go | 15 +++--- bundle/deploy/terraform/import.go | 25 +++++----- bundle/deploy/terraform/init.go | 19 ++++---- bundle/deploy/terraform/init_test.go | 4 +- bundle/deploy/terraform/interpolate.go | 7 ++- bundle/deploy/terraform/interpolate_test.go | 8 ++-- bundle/deploy/terraform/load.go | 13 ++--- bundle/deploy/terraform/load_test.go | 4 +- bundle/deploy/terraform/plan.go | 11 +++-- bundle/deploy/terraform/state_pull.go | 13 ++--- bundle/deploy/terraform/state_pull_test.go | 25 +++++----- bundle/deploy/terraform/state_push.go | 11 +++-- bundle/deploy/terraform/state_push_test.go | 4 +- bundle/deploy/terraform/unbind.go | 9 ++-- bundle/deploy/terraform/write.go | 11 +++-- bundle/libraries/match.go | 8 ++-- bundle/log_string.go | 3 +- bundle/mutator.go | 25 ++++++---- bundle/mutator_test.go | 7 +-- bundle/permissions/filter.go | 7 ++- bundle/permissions/filter_test.go | 12 ++--- bundle/permissions/mutator.go | 5 +- bundle/permissions/mutator_test.go | 8 ++-- bundle/permissions/workspace_root.go | 5 +- bundle/permissions/workspace_root_test.go | 4 +- bundle/phases/phase.go | 3 +- bundle/python/conditional_transform_test.go | 8 ++-- bundle/python/transform_test.go | 4 +- bundle/python/warning.go | 6 +-- bundle/python/warning_test.go | 8 ++-- bundle/scripts/scripts.go | 9 ++-- bundle/scripts/scripts_test.go | 4 +- bundle/seq.go | 17 ++++--- bundle/seq_test.go | 20 ++++---- bundle/tests/bundle_permissions_test.go | 10 ++-- bundle/tests/conflicting_resource_ids_test.go | 8 ++-- bundle/tests/git_test.go | 4 +- bundle/tests/include_test.go | 6 +-- bundle/tests/interpolation_test.go | 9 ++-- bundle/tests/loader.go | 12 ++--- bundle/tests/path_translation_test.go | 16 +++---- bundle/tests/pipeline_glob_paths_test.go | 8 ++-- bundle/tests/python_wheel_test.go | 32 ++++++------- .../tests/relative_path_with_includes_test.go | 4 +- bundle/tests/run_as_test.go | 13 ++--- bundle/tests/variables_test.go | 36 +++++++------- cmd/bundle/deploy.go | 9 +++- cmd/bundle/deployment/bind.go | 7 +-- cmd/bundle/deployment/unbind.go | 9 +++- cmd/bundle/destroy.go | 9 +++- cmd/bundle/run.go | 4 +- cmd/bundle/summary.go | 12 ++--- cmd/bundle/sync.go | 4 +- cmd/bundle/utils/utils.go | 7 ++- cmd/bundle/validate.go | 4 +- cmd/root/bundle.go | 13 ++--- internal/bundle/artifacts_test.go | 4 +- libs/diag/diagnostic.go | 23 +++++++++ libs/template/renderer_test.go | 11 +++-- 141 files changed, 841 insertions(+), 698 deletions(-) diff --git a/bundle/artifacts/all.go b/bundle/artifacts/all.go index 1a1661e5f..305193e2e 100644 --- a/bundle/artifacts/all.go +++ b/bundle/artifacts/all.go @@ -7,6 +7,7 @@ import ( "slices" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "golang.org/x/exp/maps" ) @@ -21,7 +22,7 @@ func (m *all) Name() string { return fmt.Sprintf("artifacts.%sAll", m.name) } -func (m *all) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *all) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { var out []bundle.Mutator // Iterate with stable ordering. @@ -31,7 +32,7 @@ func (m *all) Apply(ctx context.Context, b *bundle.Bundle) error { for _, name := range keys { m, err := m.fn(name) if err != nil { - return err + return diag.FromErr(err) } if m != nil { out = append(out, m) diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index ce2e165b7..b7a22d09d 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -14,6 +14,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" ) @@ -57,17 +58,17 @@ func (m *basicBuild) Name() string { return fmt.Sprintf("artifacts.Build(%s)", m.name) } -func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name)) out, err := artifact.Build(ctx) if err != nil { - return fmt.Errorf("build for %s failed, error: %w, output: %s", m.name, err, out) + return diag.Errorf("build for %s failed, error: %v, output: %s", m.name, err, out) } log.Infof(ctx, "Build succeeded") @@ -87,29 +88,29 @@ func (m *basicUpload) Name() string { return fmt.Sprintf("artifacts.Upload(%s)", m.name) } -func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } if len(artifact.Files) == 0 { - return fmt.Errorf("artifact source is not configured: %s", m.name) + return diag.Errorf("artifact source is not configured: %s", m.name) } uploadPath, err := getUploadBasePath(b) if err != nil { - return err + return diag.FromErr(err) } client, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath) if err != nil { - return err + return diag.FromErr(err) } err = uploadArtifact(ctx, b, artifact, uploadPath, client) if err != nil { - return fmt.Errorf("upload for %s failed, error: %w", m.name, err) + return diag.Errorf("upload for %s failed, error: %v", m.name, err) } return nil diff --git a/bundle/artifacts/autodetect.go b/bundle/artifacts/autodetect.go index 6e80ef0b6..0e94edd82 100644 --- a/bundle/artifacts/autodetect.go +++ b/bundle/artifacts/autodetect.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -19,7 +20,7 @@ func (m *autodetect) Name() string { return "artifacts.DetectPackages" } -func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // If artifacts section explicitly defined, do not try to auto detect packages if b.Config.Artifacts != nil { log.Debugf(ctx, "artifacts block is defined, skipping auto-detecting") diff --git a/bundle/artifacts/build.go b/bundle/artifacts/build.go index a78958e60..f3ee097c2 100644 --- a/bundle/artifacts/build.go +++ b/bundle/artifacts/build.go @@ -6,6 +6,7 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) func BuildAll() bundle.Mutator { @@ -27,10 +28,10 @@ func (m *build) Name() string { return fmt.Sprintf("artifacts.Build(%s)", m.name) } -func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } // Skip building if build command is not specified or infered @@ -38,7 +39,7 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { // If no build command was specified or infered and there is no // artifact output files specified, artifact is misconfigured if len(artifact.Files) == 0 { - return fmt.Errorf("misconfigured artifact: please specify 'build' or 'files' property") + return diag.Errorf("misconfigured artifact: please specify 'build' or 'files' property") } return nil } diff --git a/bundle/artifacts/infer.go b/bundle/artifacts/infer.go index ade5def51..abc509107 100644 --- a/bundle/artifacts/infer.go +++ b/bundle/artifacts/infer.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" ) var inferMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{ @@ -41,10 +42,10 @@ func (m *infer) Name() string { return fmt.Sprintf("artifacts.Infer(%s)", m.name) } -func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } // only try to infer command if it's not already defined diff --git a/bundle/artifacts/upload.go b/bundle/artifacts/upload.go index 61e652086..e2c2fc1c9 100644 --- a/bundle/artifacts/upload.go +++ b/bundle/artifacts/upload.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/workspace" ) @@ -33,14 +34,14 @@ func (m *upload) Name() string { return fmt.Sprintf("artifacts.Upload(%s)", m.name) } -func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } if len(artifact.Files) == 0 { - return fmt.Errorf("artifact source is not configured: %s", m.name) + return diag.Errorf("artifact source is not configured: %s", m.name) } // Check if source paths are absolute, if not, make them absolute @@ -57,11 +58,11 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { for _, f := range artifact.Files { matches, err := filepath.Glob(f.Source) if err != nil { - return fmt.Errorf("unable to find files for %s: %w", f.Source, err) + return diag.Errorf("unable to find files for %s: %v", f.Source, err) } if len(matches) == 0 { - return fmt.Errorf("no files found for %s", f.Source) + return diag.Errorf("no files found for %s", f.Source) } for _, match := range matches { @@ -81,10 +82,10 @@ func (m *cleanUp) Name() string { return "artifacts.CleanUp" } -func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { uploadPath, err := getUploadBasePath(b) if err != nil { - return err + return diag.FromErr(err) } b.WorkspaceClient().Workspace.Delete(ctx, workspace.Delete{ @@ -94,7 +95,7 @@ func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) error { err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, uploadPath) if err != nil { - return fmt.Errorf("unable to create directory for %s: %w", uploadPath, err) + return diag.Errorf("unable to create directory for %s: %v", uploadPath, err) } return nil diff --git a/bundle/artifacts/upload_test.go b/bundle/artifacts/upload_test.go index 6dea1c145..ec7110095 100644 --- a/bundle/artifacts/upload_test.go +++ b/bundle/artifacts/upload_test.go @@ -9,13 +9,14 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/testfile" "github.com/stretchr/testify/require" ) type noop struct{} -func (n *noop) Apply(context.Context, *bundle.Bundle) error { +func (n *noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics { return nil } @@ -57,8 +58,8 @@ func TestExpandGlobFilesSource(t *testing.T) { return &noop{} } - err = bundle.Apply(context.Background(), b, u) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, u) + require.NoError(t, diags.Error()) require.Equal(t, 2, len(b.Config.Artifacts["test"].Files)) require.Equal(t, filepath.Join(rootPath, "test", "myjar1.jar"), b.Config.Artifacts["test"].Files[0].Source) @@ -93,6 +94,6 @@ func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) { return &noop{} } - err = bundle.Apply(context.Background(), b, u) - require.ErrorContains(t, err, "no files found for") + diags := bundle.Apply(context.Background(), b, u) + require.ErrorContains(t, diags.Error(), "no files found for") } diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index c858a38c0..d11db8311 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -25,7 +26,7 @@ func (m *detectPkg) Name() string { return "artifacts.whl.AutoDetect" } -func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { wheelTasks := libraries.FindAllWheelTasksWithLocalLibraries(b) if len(wheelTasks) == 0 { log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect") @@ -50,7 +51,7 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error { pkgPath, err := filepath.Abs(b.Config.Path) if err != nil { - return err + return diag.FromErr(err) } b.Config.Artifacts[module] = &config.Artifact{ Path: pkgPath, diff --git a/bundle/artifacts/whl/build.go b/bundle/artifacts/whl/build.go index aeec31a63..992ade297 100644 --- a/bundle/artifacts/whl/build.go +++ b/bundle/artifacts/whl/build.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/python" ) @@ -27,10 +28,10 @@ func (m *build) Name() string { return fmt.Sprintf("artifacts.whl.Build(%s)", m.name) } -func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name)) @@ -43,13 +44,13 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { out, err := artifact.Build(ctx) if err != nil { - return fmt.Errorf("build failed %s, error: %w, output: %s", m.name, err, out) + return diag.Errorf("build failed %s, error: %v, output: %s", m.name, err, out) } log.Infof(ctx, "Build succeeded") wheels := python.FindFilesWithSuffixInPath(distPath, ".whl") if len(wheels) == 0 { - return fmt.Errorf("cannot find built wheel in %s for package %s", dir, m.name) + return diag.Errorf("cannot find built wheel in %s for package %s", dir, m.name) } for _, wheel := range wheels { artifact.Files = append(artifact.Files, config.ArtifactFile{ diff --git a/bundle/artifacts/whl/from_libraries.go b/bundle/artifacts/whl/from_libraries.go index 9d35f6314..a2045aaf8 100644 --- a/bundle/artifacts/whl/from_libraries.go +++ b/bundle/artifacts/whl/from_libraries.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -20,7 +21,7 @@ func (m *fromLibraries) Name() string { return "artifacts.whl.DefineArtifactsFromLibraries" } -func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error { +func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if len(b.Config.Artifacts) != 0 { log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined") return nil diff --git a/bundle/artifacts/whl/infer.go b/bundle/artifacts/whl/infer.go index dc2b8e233..dd4ad2956 100644 --- a/bundle/artifacts/whl/infer.go +++ b/bundle/artifacts/whl/infer.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/python" ) @@ -12,11 +13,11 @@ type infer struct { name string } -func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact := b.Config.Artifacts[m.name] py, err := python.DetectExecutable(ctx) if err != nil { - return err + return diag.FromErr(err) } // Note: using --build-number (build tag) flag does not help with re-installing diff --git a/bundle/config/mutator/default_target.go b/bundle/config/mutator/default_target.go index d5318a3e2..73d99002a 100644 --- a/bundle/config/mutator/default_target.go +++ b/bundle/config/mutator/default_target.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" ) type defineDefaultTarget struct { @@ -24,7 +25,7 @@ func (m *defineDefaultTarget) Name() string { return fmt.Sprintf("DefineDefaultTarget(%s)", m.name) } -func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { // Nothing to do if the configuration has at least 1 target. if len(b.Config.Targets) > 0 { return nil diff --git a/bundle/config/mutator/default_target_test.go b/bundle/config/mutator/default_target_test.go index 61a5a0138..d60b14aad 100644 --- a/bundle/config/mutator/default_target_test.go +++ b/bundle/config/mutator/default_target_test.go @@ -13,8 +13,9 @@ import ( func TestDefaultTarget(t *testing.T) { b := &bundle.Bundle{} - err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget()) + require.NoError(t, diags.Error()) + env, ok := b.Config.Targets["default"] assert.True(t, ok) assert.Equal(t, &config.Target{}, env) @@ -28,8 +29,9 @@ func TestDefaultTargetAlreadySpecified(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget()) + require.NoError(t, diags.Error()) + _, ok := b.Config.Targets["default"] assert.False(t, ok) } diff --git a/bundle/config/mutator/default_workspace_paths.go b/bundle/config/mutator/default_workspace_paths.go index 04f2b0dc0..71e562b51 100644 --- a/bundle/config/mutator/default_workspace_paths.go +++ b/bundle/config/mutator/default_workspace_paths.go @@ -2,10 +2,10 @@ package mutator import ( "context" - "fmt" "path" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type defineDefaultWorkspacePaths struct{} @@ -19,10 +19,10 @@ func (m *defineDefaultWorkspacePaths) Name() string { return "DefaultWorkspacePaths" } -func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { root := b.Config.Workspace.RootPath if root == "" { - return fmt.Errorf("unable to define default workspace paths: workspace root not defined") + return diag.Errorf("unable to define default workspace paths: workspace root not defined") } if b.Config.Workspace.FilePath == "" { diff --git a/bundle/config/mutator/default_workspace_paths_test.go b/bundle/config/mutator/default_workspace_paths_test.go index 1ad0ca786..0ba20ea2b 100644 --- a/bundle/config/mutator/default_workspace_paths_test.go +++ b/bundle/config/mutator/default_workspace_paths_test.go @@ -19,8 +19,8 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths()) + require.NoError(t, diags.Error()) assert.Equal(t, "/files", b.Config.Workspace.FilePath) assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath) assert.Equal(t, "/state", b.Config.Workspace.StatePath) @@ -37,8 +37,8 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths()) + require.NoError(t, diags.Error()) assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath) assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath) assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath) diff --git a/bundle/config/mutator/default_workspace_root.go b/bundle/config/mutator/default_workspace_root.go index 260a59584..d7c24a5b5 100644 --- a/bundle/config/mutator/default_workspace_root.go +++ b/bundle/config/mutator/default_workspace_root.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type defineDefaultWorkspaceRoot struct{} @@ -18,17 +19,17 @@ func (m *defineDefaultWorkspaceRoot) Name() string { return "DefineDefaultWorkspaceRoot" } -func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Workspace.RootPath != "" { return nil } if b.Config.Bundle.Name == "" { - return fmt.Errorf("unable to define default workspace root: bundle name not defined") + return diag.Errorf("unable to define default workspace root: bundle name not defined") } if b.Config.Bundle.Target == "" { - return fmt.Errorf("unable to define default workspace root: bundle target not selected") + return diag.Errorf("unable to define default workspace root: bundle target not selected") } b.Config.Workspace.RootPath = fmt.Sprintf( diff --git a/bundle/config/mutator/default_workspace_root_test.go b/bundle/config/mutator/default_workspace_root_test.go index 9dd549a39..b05520f62 100644 --- a/bundle/config/mutator/default_workspace_root_test.go +++ b/bundle/config/mutator/default_workspace_root_test.go @@ -20,7 +20,8 @@ func TestDefaultWorkspaceRoot(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspaceRoot()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspaceRoot()) + require.NoError(t, diags.Error()) + assert.Equal(t, "~/.bundle/name/environment", b.Config.Workspace.RootPath) } diff --git a/bundle/config/mutator/environments_compat.go b/bundle/config/mutator/environments_compat.go index 0eb996b14..cbedcaefd 100644 --- a/bundle/config/mutator/environments_compat.go +++ b/bundle/config/mutator/environments_compat.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" ) @@ -18,7 +19,7 @@ func (m *environmentsToTargets) Name() string { return "EnvironmentsToTargets" } -func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Short circuit if the "environments" key is not set. // This is the common case. if b.Config.Environments == nil { @@ -26,7 +27,7 @@ func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) err } // The "environments" key is set; validate and rewrite it to "targets". - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { environments := v.Get("environments") targets := v.Get("targets") @@ -60,4 +61,6 @@ func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) err return v, nil }) + + return diag.FromErr(err) } diff --git a/bundle/config/mutator/environments_compat_test.go b/bundle/config/mutator/environments_compat_test.go index f7045b3df..8a2129847 100644 --- a/bundle/config/mutator/environments_compat_test.go +++ b/bundle/config/mutator/environments_compat_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestEnvironmentsToTargetsWithBothDefined(t *testing.T) { @@ -26,8 +27,8 @@ func TestEnvironmentsToTargetsWithBothDefined(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) - assert.ErrorContains(t, err, `both 'environments' and 'targets' are specified;`) + diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) + assert.ErrorContains(t, diags.Error(), `both 'environments' and 'targets' are specified;`) } func TestEnvironmentsToTargetsWithEnvironmentsDefined(t *testing.T) { @@ -41,8 +42,8 @@ func TestEnvironmentsToTargetsWithEnvironmentsDefined(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) + require.NoError(t, diags.Error()) assert.Len(t, b.Config.Environments, 0) assert.Len(t, b.Config.Targets, 1) } @@ -58,8 +59,8 @@ func TestEnvironmentsToTargetsWithTargetsDefined(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) + require.NoError(t, diags.Error()) assert.Len(t, b.Config.Environments, 0) assert.Len(t, b.Config.Targets, 1) } diff --git a/bundle/config/mutator/expand_pipeline_glob_paths.go b/bundle/config/mutator/expand_pipeline_glob_paths.go index 843bc1271..268d8fa48 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" ) @@ -92,8 +93,8 @@ func (m *expandPipelineGlobPaths) expandSequence(p dyn.Path, v dyn.Value) (dyn.V return dyn.NewValue(vs, v.Location()), nil } -func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) error { - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { +func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { p := dyn.NewPattern( dyn.Key("resources"), dyn.Key("pipelines"), @@ -104,6 +105,8 @@ func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) err // Visit each pipeline's "libraries" field and expand any glob patterns. return dyn.MapByPattern(v, p, m.expandSequence) }) + + return diag.FromErr(err) } func (*expandPipelineGlobPaths) Name() string { diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go index 828eac3de..db80be028 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths_test.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -109,8 +109,8 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", filepath.Join(dir, "relative", "resource.yml")) m := ExpandPipelineGlobPaths() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) libraries := b.Config.Resources.Pipelines["pipeline"].Libraries require.Len(t, libraries, 13) diff --git a/bundle/config/mutator/expand_workspace_root.go b/bundle/config/mutator/expand_workspace_root.go index 59f19ccc4..8954abd46 100644 --- a/bundle/config/mutator/expand_workspace_root.go +++ b/bundle/config/mutator/expand_workspace_root.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type expandWorkspaceRoot struct{} @@ -20,15 +21,15 @@ func (m *expandWorkspaceRoot) Name() string { return "ExpandWorkspaceRoot" } -func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { root := b.Config.Workspace.RootPath if root == "" { - return fmt.Errorf("unable to expand workspace root: workspace root not defined") + return diag.Errorf("unable to expand workspace root: workspace root not defined") } currentUser := b.Config.Workspace.CurrentUser if currentUser == nil || currentUser.UserName == "" { - return fmt.Errorf("unable to expand workspace root: current user not set") + return diag.Errorf("unable to expand workspace root: current user not set") } if strings.HasPrefix(root, "~/") { diff --git a/bundle/config/mutator/expand_workspace_root_test.go b/bundle/config/mutator/expand_workspace_root_test.go index 17ee06509..e6260dbd8 100644 --- a/bundle/config/mutator/expand_workspace_root_test.go +++ b/bundle/config/mutator/expand_workspace_root_test.go @@ -25,8 +25,8 @@ func TestExpandWorkspaceRoot(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) + require.NoError(t, diags.Error()) assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath) } @@ -43,8 +43,8 @@ func TestExpandWorkspaceRootDoesNothing(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) + require.NoError(t, diags.Error()) assert.Equal(t, "/Users/charly@doe.com/foo", b.Config.Workspace.RootPath) } @@ -60,8 +60,8 @@ func TestExpandWorkspaceRootWithoutRoot(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) - require.Error(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) + require.True(t, diags.HasError()) } func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) { @@ -72,6 +72,6 @@ func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) - require.Error(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) + require.True(t, diags.HasError()) } diff --git a/bundle/config/mutator/if.go b/bundle/config/mutator/if.go index 462d8f004..1b7856b3c 100644 --- a/bundle/config/mutator/if.go +++ b/bundle/config/mutator/if.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type ifMutator struct { @@ -22,7 +23,7 @@ func If( } } -func (m *ifMutator) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *ifMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if m.condition(b) { return bundle.Apply(ctx, b, m.onTrueMutator) } else { diff --git a/bundle/config/mutator/initialize_variables.go b/bundle/config/mutator/initialize_variables.go index 8e50b4d04..e72cdde31 100644 --- a/bundle/config/mutator/initialize_variables.go +++ b/bundle/config/mutator/initialize_variables.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/diag" ) type initializeVariables struct{} @@ -18,7 +19,7 @@ func (m *initializeVariables) Name() string { return "InitializeVariables" } -func (m *initializeVariables) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *initializeVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { vars := b.Config.Variables for k, v := range vars { if v == nil { diff --git a/bundle/config/mutator/initialize_variables_test.go b/bundle/config/mutator/initialize_variables_test.go index 46445591a..3ca4384fa 100644 --- a/bundle/config/mutator/initialize_variables_test.go +++ b/bundle/config/mutator/initialize_variables_test.go @@ -23,8 +23,8 @@ func TestInitializeVariables(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.InitializeVariables()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.InitializeVariables()) + require.NoError(t, diags.Error()) assert.NotNil(t, b.Config.Variables["foo"]) assert.NotNil(t, b.Config.Variables["bar"]) assert.Equal(t, "This is a description", b.Config.Variables["bar"].Description) @@ -36,7 +36,7 @@ func TestInitializeVariablesWithoutVariables(t *testing.T) { Variables: nil, }, } - err := bundle.Apply(context.Background(), b, mutator.InitializeVariables()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.InitializeVariables()) + require.NoError(t, diags.Error()) assert.Nil(t, b.Config.Variables) } diff --git a/bundle/config/mutator/initialize_workspace_client.go b/bundle/config/mutator/initialize_workspace_client.go index afc38d4d5..5c905f40c 100644 --- a/bundle/config/mutator/initialize_workspace_client.go +++ b/bundle/config/mutator/initialize_workspace_client.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type initializeWorkspaceClient struct{} @@ -19,7 +20,7 @@ func (m *initializeWorkspaceClient) Name() string { // Apply initializes the workspace client for the bundle. We do this here so // downstream calls to b.WorkspaceClient() do not panic if there's an error in the // auth configuration. -func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { _, err := b.InitializeWorkspaceClient() - return err + return diag.FromErr(err) } diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index 3a50d683e..6ff9aad62 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -5,6 +5,7 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/log" ) @@ -19,11 +20,11 @@ func (m *loadGitDetails) Name() string { return "LoadGitDetails" } -func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Load relevant git repository repo, err := git.NewRepository(b.Config.Path) if err != nil { - return err + return diag.FromErr(err) } // Read branch name of current checkout @@ -57,12 +58,12 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error { // Compute relative path of the bundle root from the Git repo root. absBundlePath, err := filepath.Abs(b.Config.Path) if err != nil { - return err + return diag.FromErr(err) } // repo.Root() returns the absolute path of the repo relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath) if err != nil { - return err + return diag.FromErr(err) } b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath) return nil diff --git a/bundle/config/mutator/merge_job_clusters.go b/bundle/config/mutator/merge_job_clusters.go index 9c99cfaad..20f4efe85 100644 --- a/bundle/config/mutator/merge_job_clusters.go +++ b/bundle/config/mutator/merge_job_clusters.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/merge" ) @@ -29,8 +30,8 @@ func (m *mergeJobClusters) jobClusterKey(v dyn.Value) string { } } -func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) error { - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { +func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { if v == dyn.NilValue { return v, nil } @@ -39,4 +40,6 @@ func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) error { return dyn.Map(job, "job_clusters", merge.ElementsByKey("job_cluster_key", m.jobClusterKey)) })) }) + + return diag.FromErr(err) } diff --git a/bundle/config/mutator/merge_job_clusters_test.go b/bundle/config/mutator/merge_job_clusters_test.go index a32b70281..3ddb2b63a 100644 --- a/bundle/config/mutator/merge_job_clusters_test.go +++ b/bundle/config/mutator/merge_job_clusters_test.go @@ -50,8 +50,8 @@ func TestMergeJobClusters(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.MergeJobClusters()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.MergeJobClusters()) + assert.NoError(t, diags.Error()) j := b.Config.Resources.Jobs["foo"] @@ -99,7 +99,7 @@ func TestMergeJobClustersWithNilKey(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.MergeJobClusters()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.MergeJobClusters()) + assert.NoError(t, diags.Error()) assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) } diff --git a/bundle/config/mutator/merge_job_tasks.go b/bundle/config/mutator/merge_job_tasks.go index 91aee3a03..68c05383c 100644 --- a/bundle/config/mutator/merge_job_tasks.go +++ b/bundle/config/mutator/merge_job_tasks.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/merge" ) @@ -29,8 +30,8 @@ func (m *mergeJobTasks) taskKeyString(v dyn.Value) string { } } -func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) error { - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { +func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { if v == dyn.NilValue { return v, nil } @@ -39,4 +40,6 @@ func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) error { return dyn.Map(job, "tasks", merge.ElementsByKey("task_key", m.taskKeyString)) })) }) + + return diag.FromErr(err) } diff --git a/bundle/config/mutator/merge_job_tasks_test.go b/bundle/config/mutator/merge_job_tasks_test.go index b3fb357e0..a9dae1e10 100644 --- a/bundle/config/mutator/merge_job_tasks_test.go +++ b/bundle/config/mutator/merge_job_tasks_test.go @@ -58,8 +58,8 @@ func TestMergeJobTasks(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.MergeJobTasks()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.MergeJobTasks()) + assert.NoError(t, diags.Error()) j := b.Config.Resources.Jobs["foo"] @@ -111,7 +111,7 @@ func TestMergeJobTasksWithNilKey(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.MergeJobTasks()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.MergeJobTasks()) + assert.NoError(t, diags.Error()) assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 1) } diff --git a/bundle/config/mutator/merge_pipeline_clusters.go b/bundle/config/mutator/merge_pipeline_clusters.go index 552d997b9..0b1cf8983 100644 --- a/bundle/config/mutator/merge_pipeline_clusters.go +++ b/bundle/config/mutator/merge_pipeline_clusters.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/merge" ) @@ -32,8 +33,8 @@ func (m *mergePipelineClusters) clusterLabel(v dyn.Value) string { } } -func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) error { - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { +func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { if v == dyn.NilValue { return v, nil } @@ -42,4 +43,6 @@ func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) err return dyn.Map(pipeline, "clusters", merge.ElementsByKey("label", m.clusterLabel)) })) }) + + return diag.FromErr(err) } diff --git a/bundle/config/mutator/merge_pipeline_clusters_test.go b/bundle/config/mutator/merge_pipeline_clusters_test.go index fb54a67d2..f117d9399 100644 --- a/bundle/config/mutator/merge_pipeline_clusters_test.go +++ b/bundle/config/mutator/merge_pipeline_clusters_test.go @@ -42,8 +42,8 @@ func TestMergePipelineClusters(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, diags.Error()) p := b.Config.Resources.Pipelines["foo"] @@ -86,8 +86,8 @@ func TestMergePipelineClustersCaseInsensitive(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, diags.Error()) p := b.Config.Resources.Pipelines["foo"] assert.Len(t, p.Clusters, 1) @@ -107,8 +107,8 @@ func TestMergePipelineClustersNilPipelines(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, diags.Error()) } func TestMergePipelineClustersEmptyPipelines(t *testing.T) { @@ -120,6 +120,6 @@ func TestMergePipelineClustersEmptyPipelines(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, diags.Error()) } diff --git a/bundle/config/mutator/noop.go b/bundle/config/mutator/noop.go index 91c16385b..f27c940e3 100644 --- a/bundle/config/mutator/noop.go +++ b/bundle/config/mutator/noop.go @@ -4,11 +4,12 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type noop struct{} -func (*noop) Apply(context.Context, *bundle.Bundle) error { +func (*noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics { return nil } diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index 21d950135..6b5c89be1 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -2,11 +2,11 @@ package mutator import ( "context" - "fmt" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/env" ) @@ -32,10 +32,10 @@ func overrideJobCompute(j *resources.Job, compute string) { } } -func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Bundle.Mode != config.Development { if b.Config.Bundle.ComputeID != "" { - return fmt.Errorf("cannot override compute for an target that does not use 'mode: development'") + return diag.Errorf("cannot override compute for an target that does not use 'mode: development'") } return nil } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 7cc500c60..e5087167d 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -49,8 +49,8 @@ func TestOverrideDevelopment(t *testing.T) { } m := mutator.OverrideCompute() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) @@ -85,8 +85,8 @@ func TestOverrideDevelopmentEnv(t *testing.T) { } m := mutator.OverrideCompute() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } @@ -110,8 +110,8 @@ func TestOverridePipelineTask(t *testing.T) { } m := mutator.OverrideCompute() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } @@ -140,8 +140,8 @@ func TestOverrideProduction(t *testing.T) { } m := mutator.OverrideCompute() - err := bundle.Apply(context.Background(), b, m) - require.Error(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.True(t, diags.HasError()) } func TestOverrideProductionEnv(t *testing.T) { @@ -167,6 +167,6 @@ func TestOverrideProductionEnv(t *testing.T) { } m := mutator.OverrideCompute() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) } diff --git a/bundle/config/mutator/populate_current_user.go b/bundle/config/mutator/populate_current_user.go index a604cb902..b5e0bd437 100644 --- a/bundle/config/mutator/populate_current_user.go +++ b/bundle/config/mutator/populate_current_user.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/tags" ) @@ -20,7 +21,7 @@ func (m *populateCurrentUser) Name() string { return "PopulateCurrentUser" } -func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Workspace.CurrentUser != nil { return nil } @@ -28,7 +29,7 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error w := b.WorkspaceClient() me, err := w.CurrentUser.Me(ctx) if err != nil { - return err + return diag.FromErr(err) } b.Config.Workspace.CurrentUser = &config.User{ diff --git a/bundle/config/mutator/process_include.go b/bundle/config/mutator/process_include.go index 350c3c49c..23acdf12a 100644 --- a/bundle/config/mutator/process_include.go +++ b/bundle/config/mutator/process_include.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" ) type processInclude struct { @@ -25,10 +26,12 @@ func (m *processInclude) Name() string { return fmt.Sprintf("ProcessInclude(%s)", m.relPath) } -func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { this, err := config.Load(m.fullPath) if err != nil { - return err + return diag.FromErr(err) } - return b.Config.Merge(this) + // TODO: Return actual warnings. + err = b.Config.Merge(this) + return diag.FromErr(err) } diff --git a/bundle/config/mutator/process_include_test.go b/bundle/config/mutator/process_include_test.go index 7ca5d1981..0e5351b63 100644 --- a/bundle/config/mutator/process_include_test.go +++ b/bundle/config/mutator/process_include_test.go @@ -32,7 +32,7 @@ func TestProcessInclude(t *testing.T) { f.Close() assert.Equal(t, "foo", b.Config.Workspace.Host) - err = bundle.Apply(context.Background(), b, mutator.ProcessInclude(fullPath, relPath)) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ProcessInclude(fullPath, relPath)) + require.NoError(t, diags.Error()) assert.Equal(t, "bar", b.Config.Workspace.Host) } diff --git a/bundle/config/mutator/process_root_includes.go b/bundle/config/mutator/process_root_includes.go index 5a5ab1b19..dbf99f2dc 100644 --- a/bundle/config/mutator/process_root_includes.go +++ b/bundle/config/mutator/process_root_includes.go @@ -2,7 +2,6 @@ package mutator import ( "context" - "fmt" "os" "path/filepath" "slices" @@ -11,6 +10,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/libs/diag" ) // Get extra include paths from environment variable @@ -34,7 +34,7 @@ func (m *processRootIncludes) Name() string { return "ProcessRootIncludes" } -func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { var out []bundle.Mutator // Map with files we've already seen to avoid loading them twice. @@ -53,7 +53,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error if filepath.IsAbs(extraIncludePath) { rel, err := filepath.Rel(b.Config.Path, extraIncludePath) if err != nil { - return fmt.Errorf("unable to include file '%s': %w", extraIncludePath, err) + return diag.Errorf("unable to include file '%s': %v", extraIncludePath, err) } extraIncludePath = rel } @@ -66,19 +66,19 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error for _, entry := range b.Config.Include { // Include paths must be relative. if filepath.IsAbs(entry) { - return fmt.Errorf("%s: includes must be relative paths", entry) + return diag.Errorf("%s: includes must be relative paths", entry) } // Anchor includes to the bundle root path. matches, err := filepath.Glob(filepath.Join(b.Config.Path, entry)) if err != nil { - return err + return diag.FromErr(err) } // If the entry is not a glob pattern and no matches found, // return an error because the file defined is not found if len(matches) == 0 && !strings.ContainsAny(entry, "*?[") { - return fmt.Errorf("%s defined in 'include' section does not match any files", entry) + return diag.Errorf("%s defined in 'include' section does not match any files", entry) } // Filter matches to ones we haven't seen yet. @@ -86,7 +86,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error for _, match := range matches { rel, err := filepath.Rel(b.Config.Path, match) if err != nil { - return err + return diag.FromErr(err) } if _, ok := seen[rel]; ok { continue diff --git a/bundle/config/mutator/process_root_includes_test.go b/bundle/config/mutator/process_root_includes_test.go index 645eb89a9..7b2194553 100644 --- a/bundle/config/mutator/process_root_includes_test.go +++ b/bundle/config/mutator/process_root_includes_test.go @@ -23,8 +23,8 @@ func TestProcessRootIncludesEmpty(t *testing.T) { Path: ".", }, } - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + require.NoError(t, diags.Error()) } func TestProcessRootIncludesAbs(t *testing.T) { @@ -43,9 +43,9 @@ func TestProcessRootIncludesAbs(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.Error(t, err) - assert.Contains(t, err.Error(), "must be relative paths") + diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + require.True(t, diags.HasError()) + assert.ErrorContains(t, diags.Error(), "must be relative paths") } func TestProcessRootIncludesSingleGlob(t *testing.T) { @@ -62,9 +62,8 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) { testutil.Touch(t, b.Config.Path, "a.yml") testutil.Touch(t, b.Config.Path, "b.yml") - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) - + diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + require.NoError(t, diags.Error()) assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include) } @@ -82,9 +81,8 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) { testutil.Touch(t, b.Config.Path, "a1.yml") testutil.Touch(t, b.Config.Path, "b1.yml") - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) - + diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + require.NoError(t, diags.Error()) assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include) } @@ -101,8 +99,8 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) { testutil.Touch(t, b.Config.Path, "a.yml") - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + require.NoError(t, diags.Error()) assert.Equal(t, []string{"a.yml"}, b.Config.Include) } @@ -115,9 +113,9 @@ func TestProcessRootIncludesNotExists(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.Error(t, err) - assert.Contains(t, err.Error(), "notexist.yml defined in 'include' section does not match any files") + diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + require.True(t, diags.HasError()) + assert.ErrorContains(t, diags.Error(), "notexist.yml defined in 'include' section does not match any files") } func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) { @@ -132,8 +130,8 @@ func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + require.NoError(t, diags.Error()) assert.Contains(t, b.Config.Include, testYamlName) } @@ -155,7 +153,7 @@ func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + require.NoError(t, diags.Error()) assert.Equal(t, []string{testYamlName}, b.Config.Include) } diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index e57509452..d3de5728c 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -2,13 +2,13 @@ package mutator import ( "context" - "fmt" "path" "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" @@ -29,7 +29,7 @@ func (m *processTargetMode) Name() string { // Mark all resources as being for 'development' purposes, i.e. // changing their their name, adding tags, and (in the future) // marking them as 'hidden' in the UI. -func transformDevelopmentMode(b *bundle.Bundle) error { +func transformDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { r := b.Config.Resources shortName := b.Config.Workspace.CurrentUser.ShortName @@ -100,9 +100,9 @@ func transformDevelopmentMode(b *bundle.Bundle) error { return nil } -func validateDevelopmentMode(b *bundle.Bundle) error { +func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { if path := findNonUserPath(b); path != "" { - return fmt.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path) + return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path) } return nil } @@ -125,7 +125,7 @@ func findNonUserPath(b *bundle.Bundle) string { return "" } -func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) error { +func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) diag.Diagnostics { if b.Config.Bundle.Git.Inferred { env := b.Config.Bundle.Target log.Warnf(ctx, "target with 'mode: production' should specify an explicit 'targets.%s.git' configuration", env) @@ -134,12 +134,12 @@ func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUs r := b.Config.Resources for i := range r.Pipelines { if r.Pipelines[i].Development { - return fmt.Errorf("target with 'mode: production' cannot include a pipeline with 'development: true'") + return diag.Errorf("target with 'mode: production' cannot include a pipeline with 'development: true'") } } if !isPrincipalUsed && !isRunAsSet(r) { - return fmt.Errorf("'run_as' must be set for all jobs when using 'mode: production'") + return diag.Errorf("'run_as' must be set for all jobs when using 'mode: production'") } return nil } @@ -156,12 +156,12 @@ func isRunAsSet(r config.Resources) bool { return true } -func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { switch b.Config.Bundle.Mode { case config.Development: - err := validateDevelopmentMode(b) - if err != nil { - return err + diags := validateDevelopmentMode(b) + if diags != nil { + return diags } return transformDevelopmentMode(b) case config.Production: @@ -170,7 +170,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error { case "": // No action default: - return fmt.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode) + return diag.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode) } return nil diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index a5f61284c..17f838160 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -110,8 +110,8 @@ func TestProcessTargetModeDevelopment(t *testing.T) { b := mockBundle(config.Development) m := ProcessTargetMode() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) // Job 1 assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name) @@ -154,8 +154,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := bundle.Apply(context.Background(), b, ProcessTargetMode()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ProcessTargetMode()) + require.NoError(t, diags.Error()) // Assert that tag normalization took place. assert.Equal(t, "Hello world__", b.Config.Resources.Jobs["job1"].Tags["dev"]) @@ -168,8 +168,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := bundle.Apply(context.Background(), b, ProcessTargetMode()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ProcessTargetMode()) + require.NoError(t, diags.Error()) // Assert that tag normalization took place (Azure allows more characters than AWS). assert.Equal(t, "Héllö wörld?!", b.Config.Resources.Jobs["job1"].Tags["dev"]) @@ -182,8 +182,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := bundle.Apply(context.Background(), b, ProcessTargetMode()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ProcessTargetMode()) + require.NoError(t, diags.Error()) // Assert that tag normalization took place. assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"]) @@ -193,8 +193,8 @@ func TestProcessTargetModeDefault(t *testing.T) { b := mockBundle("") m := ProcessTargetMode() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name) assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name) assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) @@ -205,15 +205,15 @@ func TestProcessTargetModeDefault(t *testing.T) { func TestProcessTargetModeProduction(t *testing.T) { b := mockBundle(config.Production) - err := validateProductionMode(context.Background(), b, false) - require.ErrorContains(t, err, "run_as") + diags := validateProductionMode(context.Background(), b, false) + require.ErrorContains(t, diags.Error(), "run_as") b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state" b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts" b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files" - err = validateProductionMode(context.Background(), b, false) - require.ErrorContains(t, err, "production") + diags = validateProductionMode(context.Background(), b, false) + require.ErrorContains(t, diags.Error(), "production") permissions := []resources.Permission{ { @@ -232,8 +232,8 @@ func TestProcessTargetModeProduction(t *testing.T) { b.Config.Resources.Models["model1"].Permissions = permissions b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions - err = validateProductionMode(context.Background(), b, false) - require.NoError(t, err) + diags = validateProductionMode(context.Background(), b, false) + require.NoError(t, diags.Error()) assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name) assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name) @@ -246,12 +246,12 @@ func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) { b := mockBundle(config.Production) // Our target has all kinds of problems when not using service principals ... - err := validateProductionMode(context.Background(), b, false) - require.Error(t, err) + diags := validateProductionMode(context.Background(), b, false) + require.Error(t, diags.Error()) // ... but we're much less strict when a principal is used - err = validateProductionMode(context.Background(), b, true) - require.NoError(t, err) + diags = validateProductionMode(context.Background(), b, true) + require.NoError(t, diags.Error()) } // Make sure that we have test coverage for all resource types @@ -277,8 +277,8 @@ func TestAllResourcesRenamed(t *testing.T) { b := mockBundle(config.Development) m := ProcessTargetMode() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) resources := reflect.ValueOf(b.Config.Resources) for i := 0; i < resources.NumField(); i++ { diff --git a/bundle/config/mutator/resolve_resource_references.go b/bundle/config/mutator/resolve_resource_references.go index 7a7462ab9..89eaa346c 100644 --- a/bundle/config/mutator/resolve_resource_references.go +++ b/bundle/config/mutator/resolve_resource_references.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" "golang.org/x/sync/errgroup" ) @@ -15,7 +16,7 @@ func ResolveResourceReferences() bundle.Mutator { return &resolveResourceReferences{} } -func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { errs, errCtx := errgroup.WithContext(ctx) for k := range b.Config.Variables { @@ -40,7 +41,7 @@ func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) }) } - return errs.Wait() + return diag.FromErr(errs.Wait()) } func (*resolveResourceReferences) Name() string { diff --git a/bundle/config/mutator/resolve_resource_references_test.go b/bundle/config/mutator/resolve_resource_references_test.go index 5f5dab316..16934ff38 100644 --- a/bundle/config/mutator/resolve_resource_references_test.go +++ b/bundle/config/mutator/resolve_resource_references_test.go @@ -50,8 +50,8 @@ func TestResolveClusterReference(t *testing.T) { ClusterId: "9876-5432-xywz", }, nil) - err := bundle.Apply(context.Background(), b, ResolveResourceReferences()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) + require.NoError(t, diags.Error()) require.Equal(t, "1234-5678-abcd", *b.Config.Variables["my-cluster-id-1"].Value) require.Equal(t, "9876-5432-xywz", *b.Config.Variables["my-cluster-id-2"].Value) } @@ -79,8 +79,8 @@ func TestResolveNonExistentClusterReference(t *testing.T) { clusterApi := m.GetMockClustersAPI() clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef).Return(nil, fmt.Errorf("ClusterDetails named '%s' does not exist", clusterRef)) - err := bundle.Apply(context.Background(), b, ResolveResourceReferences()) - require.ErrorContains(t, err, "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist") + diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) + require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist") } func TestNoLookupIfVariableIsSet(t *testing.T) { @@ -102,8 +102,8 @@ func TestNoLookupIfVariableIsSet(t *testing.T) { b.Config.Variables["my-cluster-id"].Set("random value") - err := bundle.Apply(context.Background(), b, ResolveResourceReferences()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) + require.NoError(t, diags.Error()) require.Equal(t, "random value", *b.Config.Variables["my-cluster-id"].Value) } @@ -129,7 +129,7 @@ func TestResolveServicePrincipal(t *testing.T) { ApplicationId: "app-1234", }, nil) - err := bundle.Apply(context.Background(), b, ResolveResourceReferences()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) + require.NoError(t, diags.Error()) require.Equal(t, "app-1234", *b.Config.Variables["my-sp"].Value) } diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go index 1075e83e3..0738c9bcb 100644 --- a/bundle/config/mutator/resolve_variable_references.go +++ b/bundle/config/mutator/resolve_variable_references.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/convert" "github.com/databricks/cli/libs/dyn/dynvar" @@ -26,7 +27,7 @@ func (m *resolveVariableReferences) Validate(ctx context.Context, b *bundle.Bund return nil } -func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { prefixes := make([]dyn.Path, len(m.prefixes)) for i, prefix := range m.prefixes { prefixes[i] = dyn.MustPathFromString(prefix) @@ -36,7 +37,7 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) // We rewrite it here to make the resolution logic simpler. varPath := dyn.NewPath(dyn.Key("var")) - return b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { + err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { // Synthesize a copy of the root that has all fields that are present in the type // but not set in the dynamic value set to their corresponding empty value. // This enables users to interpolate variable references to fields that haven't @@ -92,4 +93,6 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) } return root, nil }) + + return diag.FromErr(err) } diff --git a/bundle/config/mutator/resolve_variable_references_test.go b/bundle/config/mutator/resolve_variable_references_test.go index 8190c360f..651ea3d2c 100644 --- a/bundle/config/mutator/resolve_variable_references_test.go +++ b/bundle/config/mutator/resolve_variable_references_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -29,14 +30,14 @@ func TestResolveVariableReferences(t *testing.T) { } // Apply with an invalid prefix. This should not change the workspace root path. - err := bundle.Apply(context.Background(), b, ResolveVariableReferences("doesntexist")) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("doesntexist")) + require.NoError(t, diags.Error()) require.Equal(t, "${bundle.name}/bar", b.Config.Workspace.RootPath) require.Equal(t, "${workspace.root_path}/baz", b.Config.Workspace.FilePath) // Apply with a valid prefix. This should change the workspace root path. - err = bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "workspace")) - require.NoError(t, err) + diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "workspace")) + require.NoError(t, diags.Error()) require.Equal(t, "example/bar", b.Config.Workspace.RootPath) require.Equal(t, "example/bar/baz", b.Config.Workspace.FilePath) } @@ -63,8 +64,8 @@ func TestResolveVariableReferencesToBundleVariables(t *testing.T) { } // Apply with a valid prefix. This should change the workspace root path. - err := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "variables")) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "variables")) + require.NoError(t, diags.Error()) require.Equal(t, "example/bar", b.Config.Workspace.RootPath) } @@ -92,15 +93,15 @@ func TestResolveVariableReferencesToEmptyFields(t *testing.T) { } // Apply for the bundle prefix. - err := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle")) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle")) + require.NoError(t, diags.Error()) // The job settings should have been interpolated to an empty string. require.Equal(t, "", b.Config.Resources.Jobs["job1"].JobSettings.Tags["git_branch"]) } func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) { - var err error + var diags diag.Diagnostics b := &bundle.Bundle{ Config: config.Root{ @@ -142,20 +143,21 @@ func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) { ctx := context.Background() // Initialize the variables. - err = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { - return b.Config.InitializeVariables([]string{ + diags = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.InitializeVariables([]string{ "no_alert_for_canceled_runs=true", "no_alert_for_skipped_runs=true", "min_workers=1", "max_workers=2", "spot_bid_max_price=0.5", }) + return diag.FromErr(err) }) - require.NoError(t, err) + require.NoError(t, diags.Error()) // Assign the variables to the dynamic configuration. - err = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + diags = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { var p dyn.Path var err error @@ -180,12 +182,13 @@ func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) { return v, nil }) + return diag.FromErr(err) }) - require.NoError(t, err) + require.NoError(t, diags.Error()) // Apply for the variable prefix. This should resolve the variables to their values. - err = bundle.Apply(context.Background(), b, ResolveVariableReferences("variables")) - require.NoError(t, err) + diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("variables")) + require.NoError(t, diags.Error()) assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForCanceledRuns) assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForSkippedRuns) assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MinWorkers) diff --git a/bundle/config/mutator/rewrite_sync_paths.go b/bundle/config/mutator/rewrite_sync_paths.go index 5e17b1b5f..0785c6430 100644 --- a/bundle/config/mutator/rewrite_sync_paths.go +++ b/bundle/config/mutator/rewrite_sync_paths.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" ) @@ -41,8 +42,8 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc { } } -func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) error { - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { +func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) { v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.Config.Path))) if err != nil { @@ -55,4 +56,6 @@ func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) error { return v, nil }) }) + + return diag.FromErr(err) } diff --git a/bundle/config/mutator/rewrite_sync_paths_test.go b/bundle/config/mutator/rewrite_sync_paths_test.go index 576333e92..667f811ac 100644 --- a/bundle/config/mutator/rewrite_sync_paths_test.go +++ b/bundle/config/mutator/rewrite_sync_paths_test.go @@ -34,8 +34,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) { bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml") bundletest.SetLocation(b, "sync.exclude[1]", "./a/b/c/file.yml") - err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, diags.Error()) assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0]) assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1]) @@ -65,8 +65,8 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) { bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml") bundletest.SetLocation(b, "sync.exclude[1]", "/tmp/dir/a/b/c/file.yml") - err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, diags.Error()) assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0]) assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1]) @@ -82,8 +82,8 @@ func TestRewriteSyncPathsErrorPaths(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, diags.Error()) }) t.Run("empty include/exclude blocks", func(t *testing.T) { @@ -97,7 +97,7 @@ func TestRewriteSyncPathsErrorPaths(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, diags.Error()) }) } diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index 7d1a49175..243f8ef7d 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -23,7 +24,7 @@ func (m *setRunAs) Name() string { return "SetRunAs" } -func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { runAs := b.Config.RunAs if runAs == nil { return nil diff --git a/bundle/config/mutator/select_default_target.go b/bundle/config/mutator/select_default_target.go index be5046f82..4ac0aae6f 100644 --- a/bundle/config/mutator/select_default_target.go +++ b/bundle/config/mutator/select_default_target.go @@ -2,10 +2,10 @@ package mutator import ( "context" - "fmt" "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "golang.org/x/exp/maps" ) @@ -20,9 +20,9 @@ func (m *selectDefaultTarget) Name() string { return "SelectDefaultTarget" } -func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if len(b.Config.Targets) == 0 { - return fmt.Errorf("no targets defined") + return diag.Errorf("no targets defined") } // One target means there's only one default. @@ -41,12 +41,12 @@ func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error // It is invalid to have multiple targets with the `default` flag set. if len(defaults) > 1 { - return fmt.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", ")) + return diag.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", ")) } // If no target has the `default` flag set, ask the user to specify one. if len(defaults) == 0 { - return fmt.Errorf("please specify target") + return diag.Errorf("please specify target") } // One default remaining. diff --git a/bundle/config/mutator/select_default_target_test.go b/bundle/config/mutator/select_default_target_test.go index 1c2e451fe..dfea4ff67 100644 --- a/bundle/config/mutator/select_default_target_test.go +++ b/bundle/config/mutator/select_default_target_test.go @@ -16,8 +16,8 @@ func TestSelectDefaultTargetNoTargets(t *testing.T) { Targets: map[string]*config.Target{}, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.ErrorContains(t, err, "no targets defined") + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.ErrorContains(t, diags.Error(), "no targets defined") } func TestSelectDefaultTargetSingleTargets(t *testing.T) { @@ -28,8 +28,8 @@ func TestSelectDefaultTargetSingleTargets(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.NoError(t, diags.Error()) assert.Equal(t, "foo", b.Config.Bundle.Target) } @@ -43,8 +43,8 @@ func TestSelectDefaultTargetNoDefaults(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.ErrorContains(t, err, "please specify target") + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.ErrorContains(t, diags.Error(), "please specify target") } func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) { @@ -56,8 +56,8 @@ func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.ErrorContains(t, err, "please specify target") + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.ErrorContains(t, diags.Error(), "please specify target") } func TestSelectDefaultTargetMultipleDefaults(t *testing.T) { @@ -70,8 +70,8 @@ func TestSelectDefaultTargetMultipleDefaults(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.ErrorContains(t, err, "multiple targets are marked as default") + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.ErrorContains(t, diags.Error(), "multiple targets are marked as default") } func TestSelectDefaultTargetSingleDefault(t *testing.T) { @@ -84,7 +84,7 @@ func TestSelectDefaultTargetSingleDefault(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.NoError(t, diags.Error()) assert.Equal(t, "bar", b.Config.Bundle.Target) } diff --git a/bundle/config/mutator/select_target.go b/bundle/config/mutator/select_target.go index 95558f030..178686b6e 100644 --- a/bundle/config/mutator/select_target.go +++ b/bundle/config/mutator/select_target.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "golang.org/x/exp/maps" ) @@ -24,21 +25,21 @@ func (m *selectTarget) Name() string { return fmt.Sprintf("SelectTarget(%s)", m.name) } -func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Targets == nil { - return fmt.Errorf("no targets defined") + return diag.Errorf("no targets defined") } // Get specified target _, ok := b.Config.Targets[m.name] if !ok { - return fmt.Errorf("%s: no such target. Available targets: %s", m.name, strings.Join(maps.Keys(b.Config.Targets), ", ")) + return diag.Errorf("%s: no such target. Available targets: %s", m.name, strings.Join(maps.Keys(b.Config.Targets), ", ")) } // Merge specified target into root configuration structure. err := b.Config.MergeTargetOverrides(m.name) if err != nil { - return err + return diag.FromErr(err) } // Store specified target in configuration for reference. diff --git a/bundle/config/mutator/select_target_test.go b/bundle/config/mutator/select_target_test.go index 20467270b..a7c5ac93c 100644 --- a/bundle/config/mutator/select_target_test.go +++ b/bundle/config/mutator/select_target_test.go @@ -26,8 +26,8 @@ func TestSelectTarget(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectTarget("default")) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.SelectTarget("default")) + require.NoError(t, diags.Error()) assert.Equal(t, "bar", b.Config.Workspace.Host) } @@ -39,6 +39,6 @@ func TestSelectTargetNotFound(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectTarget("doesnt-exist")) - require.Error(t, err, "no targets defined") + diags := bundle.Apply(context.Background(), b, mutator.SelectTarget("doesnt-exist")) + require.Error(t, diags.Error(), "no targets defined") } diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index 3b9ac8ae7..bb88379e0 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -2,10 +2,10 @@ package mutator import ( "context" - "fmt" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/env" ) @@ -21,7 +21,7 @@ func (m *setVariables) Name() string { return "SetVariables" } -func setVariable(ctx context.Context, v *variable.Variable, name string) error { +func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Diagnostics { // case: variable already has value initialized, so skip if v.HasValue() { return nil @@ -32,7 +32,7 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) error { if val, ok := env.Lookup(ctx, envVarName); ok { err := v.Set(val) if err != nil { - return fmt.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %w`, val, name, envVarName, err) + return diag.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %v`, val, name, envVarName, err) } return nil } @@ -41,7 +41,7 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) error { if v.HasDefault() { err := v.Set(*v.Default) if err != nil { - return fmt.Errorf(`failed to assign default value from config "%s" to variable %s with error: %w`, *v.Default, name, err) + return diag.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, *v.Default, name, err) } return nil } @@ -55,15 +55,16 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) error { // We should have had a value to set for the variable at this point. // TODO: use cmdio to request values for unassigned variables if current // terminal is a tty. Tracked in https://github.com/databricks/cli/issues/379 - return fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) + return diag.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) } -func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + var diags diag.Diagnostics for name, variable := range b.Config.Variables { - err := setVariable(ctx, variable, name) - if err != nil { - return err + diags = diags.Extend(setVariable(ctx, variable, name)) + if diags.HasError() { + return diags } } - return nil + return diags } diff --git a/bundle/config/mutator/set_variables_test.go b/bundle/config/mutator/set_variables_test.go index 15a98e5cf..ae4f79896 100644 --- a/bundle/config/mutator/set_variables_test.go +++ b/bundle/config/mutator/set_variables_test.go @@ -21,8 +21,8 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) { // set value for variable as an environment variable t.Setenv("BUNDLE_VAR_foo", "process-env") - err := setVariable(context.Background(), &variable, "foo") - require.NoError(t, err) + diags := setVariable(context.Background(), &variable, "foo") + require.NoError(t, diags.Error()) assert.Equal(t, *variable.Value, "process-env") } @@ -33,8 +33,8 @@ func TestSetVariableUsingDefaultValue(t *testing.T) { Default: &defaultVal, } - err := setVariable(context.Background(), &variable, "foo") - require.NoError(t, err) + diags := setVariable(context.Background(), &variable, "foo") + require.NoError(t, diags.Error()) assert.Equal(t, *variable.Value, "default") } @@ -49,8 +49,8 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { // since a value is already assigned to the variable, it would not be overridden // by the default value - err := setVariable(context.Background(), &variable, "foo") - require.NoError(t, err) + diags := setVariable(context.Background(), &variable, "foo") + require.NoError(t, diags.Error()) assert.Equal(t, *variable.Value, "assigned-value") } @@ -68,8 +68,8 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { // since a value is already assigned to the variable, it would not be overridden // by the value from environment - err := setVariable(context.Background(), &variable, "foo") - require.NoError(t, err) + diags := setVariable(context.Background(), &variable, "foo") + require.NoError(t, diags.Error()) assert.Equal(t, *variable.Value, "assigned-value") } @@ -79,8 +79,8 @@ func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) { } // fails because we could not resolve a value for the variable - err := setVariable(context.Background(), &variable, "foo") - assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable") + diags := setVariable(context.Background(), &variable, "foo") + assert.ErrorContains(t, diags.Error(), "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable") } func TestSetVariablesMutator(t *testing.T) { @@ -108,8 +108,8 @@ func TestSetVariablesMutator(t *testing.T) { t.Setenv("BUNDLE_VAR_b", "env-var-b") - err := bundle.Apply(context.Background(), b, SetVariables()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, SetVariables()) + require.NoError(t, diags.Error()) assert.Equal(t, "default-a", *b.Config.Variables["a"].Value) assert.Equal(t, "env-var-b", *b.Config.Variables["b"].Value) assert.Equal(t, "assigned-val-c", *b.Config.Variables["c"].Value) diff --git a/bundle/config/mutator/trampoline.go b/bundle/config/mutator/trampoline.go index 24600f52f..72c053b59 100644 --- a/bundle/config/mutator/trampoline.go +++ b/bundle/config/mutator/trampoline.go @@ -9,6 +9,7 @@ import ( "text/template" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -40,12 +41,12 @@ func (m *trampoline) Name() string { return fmt.Sprintf("trampoline(%s)", m.name) } -func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tasks := m.functions.GetTasks(b) for _, task := range tasks { err := m.generateNotebookWrapper(ctx, b, task) if err != nil { - return err + return diag.FromErr(err) } } return nil diff --git a/bundle/config/mutator/trampoline_test.go b/bundle/config/mutator/trampoline_test.go index a3e06b303..8a375aa9b 100644 --- a/bundle/config/mutator/trampoline_test.go +++ b/bundle/config/mutator/trampoline_test.go @@ -80,8 +80,8 @@ func TestGenerateTrampoline(t *testing.T) { funcs := functions{} trampoline := NewTrampoline("test_trampoline", &funcs, "Hello from {{.MyName}}") - err := bundle.Apply(ctx, b, trampoline) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, trampoline) + require.NoError(t, diags.Error()) dir, err := b.InternalDir(ctx) require.NoError(t, err) diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index ac1da5bf2..af6896ee0 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/notebook" ) @@ -185,10 +186,10 @@ func (m *translatePaths) rewriteRelativeTo(b *bundle.Bundle, p dyn.Path, v dyn.V return dyn.InvalidValue, err } -func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { m.seen = make(map[string]string) - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { var err error for _, fn := range []func(*bundle.Bundle, dyn.Value) (dyn.Value, error){ m.applyJobTranslations, @@ -202,4 +203,6 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error { } return v, nil }) + + return diag.FromErr(err) } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 7e2f12ab0..bd2ec809b 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -78,8 +78,8 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) assert.Equal( t, @@ -201,8 +201,8 @@ func TestTranslatePaths(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) // Assert that the path in the tasks now refer to the artifact. assert.Equal( @@ -332,8 +332,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml")) bundletest.SetLocation(b, "resources.pipelines", filepath.Join(dir, "pipeline/resource.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) assert.Equal( t, @@ -392,8 +392,8 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, "is not contained in bundle root") + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.ErrorContains(t, diags.Error(), "is not contained in bundle root") } func TestJobNotebookDoesNotExistError(t *testing.T) { @@ -422,8 +422,8 @@ func TestJobNotebookDoesNotExistError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.EqualError(t, err, "notebook ./doesnt_exist.py not found") + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found") } func TestJobFileDoesNotExistError(t *testing.T) { @@ -452,8 +452,8 @@ func TestJobFileDoesNotExistError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.EqualError(t, err, "file ./doesnt_exist.py not found") + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found") } func TestPipelineNotebookDoesNotExistError(t *testing.T) { @@ -482,8 +482,8 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.EqualError(t, err, "notebook ./doesnt_exist.py not found") + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found") } func TestPipelineFileDoesNotExistError(t *testing.T) { @@ -512,8 +512,8 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.EqualError(t, err, "file ./doesnt_exist.py not found") + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found") } func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { @@ -546,8 +546,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`) + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.ErrorContains(t, diags.Error(), `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`) } func TestJobNotebookTaskWithFileSourceError(t *testing.T) { @@ -580,8 +580,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`) + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`) } func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { @@ -614,8 +614,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`) + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`) } func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { @@ -648,6 +648,6 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`) + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`) } diff --git a/bundle/config/mutator/validate_git_details.go b/bundle/config/mutator/validate_git_details.go index 116498bfc..69a4221fd 100644 --- a/bundle/config/mutator/validate_git_details.go +++ b/bundle/config/mutator/validate_git_details.go @@ -2,9 +2,9 @@ package mutator import ( "context" - "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type validateGitDetails struct{} @@ -17,13 +17,13 @@ func (m *validateGitDetails) Name() string { return "ValidateGitDetails" } -func (m *validateGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *validateGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Bundle.Git.Branch == "" || b.Config.Bundle.Git.ActualBranch == "" { return nil } if b.Config.Bundle.Git.Branch != b.Config.Bundle.Git.ActualBranch && !b.Config.Bundle.Force { - return fmt.Errorf("not on the right Git branch:\n expected according to configuration: %s\n actual: %s\nuse --force to override", b.Config.Bundle.Git.Branch, b.Config.Bundle.Git.ActualBranch) + return diag.Errorf("not on the right Git branch:\n expected according to configuration: %s\n actual: %s\nuse --force to override", b.Config.Bundle.Git.Branch, b.Config.Bundle.Git.ActualBranch) } return nil } diff --git a/bundle/config/mutator/validate_git_details_test.go b/bundle/config/mutator/validate_git_details_test.go index f207d9cf9..952e0b572 100644 --- a/bundle/config/mutator/validate_git_details_test.go +++ b/bundle/config/mutator/validate_git_details_test.go @@ -22,9 +22,8 @@ func TestValidateGitDetailsMatchingBranches(t *testing.T) { } m := ValidateGitDetails() - err := bundle.Apply(context.Background(), b, m) - - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + assert.NoError(t, diags.Error()) } func TestValidateGitDetailsNonMatchingBranches(t *testing.T) { @@ -40,10 +39,10 @@ func TestValidateGitDetailsNonMatchingBranches(t *testing.T) { } m := ValidateGitDetails() - err := bundle.Apply(context.Background(), b, m) + diags := bundle.Apply(context.Background(), b, m) expectedError := "not on the right Git branch:\n expected according to configuration: main\n actual: feature\nuse --force to override" - assert.EqualError(t, err, expectedError) + assert.EqualError(t, diags.Error(), expectedError) } func TestValidateGitDetailsNotUsingGit(t *testing.T) { @@ -59,7 +58,6 @@ func TestValidateGitDetailsNotUsingGit(t *testing.T) { } m := ValidateGitDetails() - err := bundle.Apply(context.Background(), b, m) - - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + assert.NoError(t, diags.Error()) } diff --git a/bundle/deferred.go b/bundle/deferred.go index 5f3351fcf..56c2bdca2 100644 --- a/bundle/deferred.go +++ b/bundle/deferred.go @@ -3,7 +3,7 @@ package bundle import ( "context" - "github.com/databricks/cli/libs/errs" + "github.com/databricks/cli/libs/diag" ) type DeferredMutator struct { @@ -22,12 +22,9 @@ func Defer(mutator Mutator, finally Mutator) Mutator { } } -func (d *DeferredMutator) Apply(ctx context.Context, b *Bundle) error { - mainErr := Apply(ctx, b, d.mutator) - errOnFinish := Apply(ctx, b, d.finally) - if mainErr != nil || errOnFinish != nil { - return errs.FromMany(mainErr, errOnFinish) - } - - return nil +func (d *DeferredMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { + var diags diag.Diagnostics + diags = diags.Extend(Apply(ctx, b, d.mutator)) + diags = diags.Extend(Apply(ctx, b, d.finally)) + return diags } diff --git a/bundle/deferred_test.go b/bundle/deferred_test.go index f75867d69..3abc4aa10 100644 --- a/bundle/deferred_test.go +++ b/bundle/deferred_test.go @@ -2,9 +2,9 @@ package bundle import ( "context" - "fmt" "testing" + "github.com/databricks/cli/libs/diag" "github.com/stretchr/testify/assert" ) @@ -17,9 +17,9 @@ func (t *mutatorWithError) Name() string { return "mutatorWithError" } -func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) error { +func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) diag.Diagnostics { t.applyCalled++ - return fmt.Errorf(t.errorMsg) + return diag.Errorf(t.errorMsg) } func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) { @@ -30,8 +30,8 @@ func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) { deferredMutator := Defer(Seq(m1, m2, m3), cleanup) b := &Bundle{} - err := Apply(context.Background(), b, deferredMutator) - assert.NoError(t, err) + diags := Apply(context.Background(), b, deferredMutator) + assert.NoError(t, diags.Error()) assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) @@ -47,8 +47,8 @@ func TestDeferredMutatorWhenFirstFails(t *testing.T) { deferredMutator := Defer(Seq(mErr, m1, m2), cleanup) b := &Bundle{} - err := Apply(context.Background(), b, deferredMutator) - assert.ErrorContains(t, err, "mutator error occurred") + diags := Apply(context.Background(), b, deferredMutator) + assert.ErrorContains(t, diags.Error(), "mutator error occurred") assert.Equal(t, 1, mErr.applyCalled) assert.Equal(t, 0, m1.applyCalled) @@ -64,8 +64,8 @@ func TestDeferredMutatorWhenMiddleOneFails(t *testing.T) { deferredMutator := Defer(Seq(m1, mErr, m2), cleanup) b := &Bundle{} - err := Apply(context.Background(), b, deferredMutator) - assert.ErrorContains(t, err, "mutator error occurred") + diags := Apply(context.Background(), b, deferredMutator) + assert.ErrorContains(t, diags.Error(), "mutator error occurred") assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, mErr.applyCalled) @@ -81,8 +81,8 @@ func TestDeferredMutatorWhenLastOneFails(t *testing.T) { deferredMutator := Defer(Seq(m1, m2, mErr), cleanup) b := &Bundle{} - err := Apply(context.Background(), b, deferredMutator) - assert.ErrorContains(t, err, "mutator error occurred") + diags := Apply(context.Background(), b, deferredMutator) + assert.ErrorContains(t, diags.Error(), "mutator error occurred") assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) @@ -98,8 +98,14 @@ func TestDeferredMutatorCombinesErrorMessages(t *testing.T) { deferredMutator := Defer(Seq(m1, m2, mErr), cleanupErr) b := &Bundle{} - err := Apply(context.Background(), b, deferredMutator) - assert.ErrorContains(t, err, "mutator error occurred\ncleanup error occurred") + diags := Apply(context.Background(), b, deferredMutator) + + var errs []string + for _, d := range diags { + errs = append(errs, d.Summary) + } + assert.Contains(t, errs, "mutator error occurred") + assert.Contains(t, errs, "cleanup error occurred") assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) diff --git a/bundle/deploy/check_running_resources.go b/bundle/deploy/check_running_resources.go index deb7775c6..7f7a9bcac 100644 --- a/bundle/deploy/check_running_resources.go +++ b/bundle/deploy/check_running_resources.go @@ -6,6 +6,7 @@ import ( "strconv" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -30,29 +31,29 @@ func (l *checkRunningResources) Name() string { return "check-running-resources" } -func (l *checkRunningResources) Apply(ctx context.Context, b *bundle.Bundle) error { +func (l *checkRunningResources) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if !b.Config.Bundle.Deployment.FailOnActiveRuns { return nil } tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } err := tf.Init(ctx, tfexec.Upgrade(true)) if err != nil { - return fmt.Errorf("terraform init: %w", err) + return diag.Errorf("terraform init: %v", err) } state, err := b.Terraform.Show(ctx) if err != nil { - return err + return diag.FromErr(err) } err = checkAnyResourceRunning(ctx, b.WorkspaceClient(), state) if err != nil { - return fmt.Errorf("deployment aborted, err: %w", err) + return diag.Errorf("deployment aborted, err: %v", err) } return nil diff --git a/bundle/deploy/files/delete.go b/bundle/deploy/files/delete.go index 8585ec3c8..9367e2a62 100644 --- a/bundle/deploy/files/delete.go +++ b/bundle/deploy/files/delete.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/fatih/color" ) @@ -16,7 +17,7 @@ func (m *delete) Name() string { return "files.Delete" } -func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Do not delete files if terraform destroy was not consented if !b.Plan.IsEmpty && !b.Plan.ConfirmApply { return nil @@ -29,7 +30,7 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error { if !b.AutoApprove { proceed, err := cmdio.AskYesOrNo(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?", b.Config.Workspace.RootPath, red("deleted permanently!"))) if err != nil { - return err + return diag.FromErr(err) } if !proceed { return nil @@ -41,17 +42,17 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error { Recursive: true, }) if err != nil { - return err + return diag.FromErr(err) } // Clean up sync snapshot file sync, err := GetSync(ctx, b) if err != nil { - return err + return diag.FromErr(err) } err = sync.DestroySnapshot(ctx) if err != nil { - return err + return diag.FromErr(err) } cmdio.LogString(ctx, fmt.Sprintf("Deleted snapshot file at %s", sync.SnapshotPath())) diff --git a/bundle/deploy/files/upload.go b/bundle/deploy/files/upload.go index 4da41e202..58cb3c0f0 100644 --- a/bundle/deploy/files/upload.go +++ b/bundle/deploy/files/upload.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -15,16 +16,16 @@ func (m *upload) Name() string { return "files.Upload" } -func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath)) sync, err := GetSync(ctx, b) if err != nil { - return err + return diag.FromErr(err) } err = sync.RunOnce(ctx) if err != nil { - return err + return diag.FromErr(err) } log.Infof(ctx, "Uploaded bundle files") diff --git a/bundle/deploy/lock/acquire.go b/bundle/deploy/lock/acquire.go index 69e6663fc..7d3d0eca8 100644 --- a/bundle/deploy/lock/acquire.go +++ b/bundle/deploy/lock/acquire.go @@ -3,9 +3,9 @@ package lock import ( "context" "errors" - "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" @@ -33,7 +33,7 @@ func (m *acquire) init(b *bundle.Bundle) error { return nil } -func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Return early if locking is disabled. if !b.Config.Bundle.Deployment.Lock.IsEnabled() { log.Infof(ctx, "Skipping; locking is disabled") @@ -42,7 +42,7 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error { err := m.init(b) if err != nil { - return err + return diag.FromErr(err) } force := b.Config.Bundle.Deployment.Lock.Force @@ -55,9 +55,9 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error { if errors.As(err, ¬ExistsError) { // If we get a "doesn't exist" error from the API this indicates // we either don't have permissions or the path is invalid. - return fmt.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath) + return diag.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath) } - return err + return diag.FromErr(err) } return nil diff --git a/bundle/deploy/lock/release.go b/bundle/deploy/lock/release.go index 4ea47c2f9..26f95edfc 100644 --- a/bundle/deploy/lock/release.go +++ b/bundle/deploy/lock/release.go @@ -2,9 +2,9 @@ package lock import ( "context" - "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" ) @@ -30,7 +30,7 @@ func (m *release) Name() string { return "lock:release" } -func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *release) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Return early if locking is disabled. if !b.Config.Bundle.Deployment.Lock.IsEnabled() { log.Infof(ctx, "Skipping; locking is disabled") @@ -47,12 +47,12 @@ func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error { log.Infof(ctx, "Releasing deployment lock") switch m.goal { case GoalDeploy: - return b.Locker.Unlock(ctx) + return diag.FromErr(b.Locker.Unlock(ctx)) case GoalBind, GoalUnbind: - return b.Locker.Unlock(ctx) + return diag.FromErr(b.Locker.Unlock(ctx)) case GoalDestroy: - return b.Locker.Unlock(ctx, locker.AllowLockFileNotExist) + return diag.FromErr(b.Locker.Unlock(ctx, locker.AllowLockFileNotExist)) default: - return fmt.Errorf("unknown goal for lock release: %s", m.goal) + return diag.Errorf("unknown goal for lock release: %s", m.goal) } } diff --git a/bundle/deploy/metadata/annotate_jobs.go b/bundle/deploy/metadata/annotate_jobs.go index 5b9ae5b88..372cbca13 100644 --- a/bundle/deploy/metadata/annotate_jobs.go +++ b/bundle/deploy/metadata/annotate_jobs.go @@ -5,6 +5,7 @@ import ( "path" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -18,7 +19,7 @@ func (m *annotateJobs) Name() string { return "metadata.AnnotateJobs" } -func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { for _, job := range b.Config.Resources.Jobs { if job.JobSettings == nil { continue diff --git a/bundle/deploy/metadata/annotate_jobs_test.go b/bundle/deploy/metadata/annotate_jobs_test.go index c7a02e754..8f2ab9c03 100644 --- a/bundle/deploy/metadata/annotate_jobs_test.go +++ b/bundle/deploy/metadata/annotate_jobs_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAnnotateJobsMutator(t *testing.T) { @@ -34,8 +35,8 @@ func TestAnnotateJobsMutator(t *testing.T) { }, } - err := AnnotateJobs().Apply(context.Background(), b) - assert.NoError(t, err) + diags := AnnotateJobs().Apply(context.Background(), b) + require.NoError(t, diags.Error()) assert.Equal(t, &jobs.JobDeployment{ @@ -67,6 +68,6 @@ func TestAnnotateJobsMutatorJobWithoutSettings(t *testing.T) { }, } - err := AnnotateJobs().Apply(context.Background(), b) - assert.NoError(t, err) + diags := AnnotateJobs().Apply(context.Background(), b) + require.NoError(t, diags.Error()) } diff --git a/bundle/deploy/metadata/compute.go b/bundle/deploy/metadata/compute.go index c612d33a3..5a46cd67f 100644 --- a/bundle/deploy/metadata/compute.go +++ b/bundle/deploy/metadata/compute.go @@ -2,12 +2,12 @@ package metadata import ( "context" - "fmt" "path/filepath" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/metadata" + "github.com/databricks/cli/libs/diag" ) type compute struct{} @@ -20,7 +20,7 @@ func (m *compute) Name() string { return "metadata.Compute" } -func (m *compute) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { b.Metadata = metadata.Metadata{ Version: metadata.Version, Config: metadata.Config{}, @@ -41,7 +41,7 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) error { // root relativePath, err := filepath.Rel(b.Config.Path, job.ConfigFilePath) if err != nil { - return fmt.Errorf("failed to compute relative path for job %s: %w", name, err) + return diag.Errorf("failed to compute relative path for job %s: %v", name, err) } // Metadata for the job jobsMetadata[name] = &metadata.Job{ diff --git a/bundle/deploy/metadata/compute_test.go b/bundle/deploy/metadata/compute_test.go index e717ebd53..6d43f845b 100644 --- a/bundle/deploy/metadata/compute_test.go +++ b/bundle/deploy/metadata/compute_test.go @@ -91,8 +91,8 @@ func TestComputeMetadataMutator(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, Compute()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, Compute()) + require.NoError(t, diags.Error()) assert.Equal(t, expectedMetadata, b.Metadata) } diff --git a/bundle/deploy/metadata/upload.go b/bundle/deploy/metadata/upload.go index f550a66e7..a040a0ae8 100644 --- a/bundle/deploy/metadata/upload.go +++ b/bundle/deploy/metadata/upload.go @@ -6,6 +6,7 @@ import ( "encoding/json" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" ) @@ -21,16 +22,16 @@ func (m *upload) Name() string { return "metadata.Upload" } -func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath) if err != nil { - return err + return diag.FromErr(err) } metadata, err := json.MarshalIndent(b.Metadata, "", " ") if err != nil { - return err + return diag.FromErr(err) } - return f.Write(ctx, MetadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists) + return diag.FromErr(f.Write(ctx, MetadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists)) } diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go index 089a870cb..61f5426a0 100644 --- a/bundle/deploy/state_pull.go +++ b/bundle/deploy/state_pull.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/deploy/files" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/sync" @@ -20,10 +21,10 @@ type statePull struct { filerFactory FilerFactory } -func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { +func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { f, err := s.filerFactory(b) if err != nil { - return err + return diag.FromErr(err) } // Download deployment state file from filer to local cache directory. @@ -31,7 +32,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { remote, err := s.remoteState(ctx, f) if err != nil { log.Infof(ctx, "Unable to open remote deployment state file: %s", err) - return err + return diag.FromErr(err) } if remote == nil { log.Infof(ctx, "Remote deployment state file does not exist") @@ -40,19 +41,19 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { statePath, err := getPathToStateFile(ctx, b) if err != nil { - return err + return diag.FromErr(err) } local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0600) if err != nil { - return err + return diag.FromErr(err) } defer local.Close() data := remote.Bytes() err = validateRemoteStateCompatibility(bytes.NewReader(data)) if err != nil { - return err + return diag.FromErr(err) } if !isLocalStateStale(local, bytes.NewReader(data)) { @@ -68,30 +69,30 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { log.Infof(ctx, "Writing remote deployment state file to local cache directory") _, err = io.Copy(local, bytes.NewReader(data)) if err != nil { - return err + return diag.FromErr(err) } var state DeploymentState err = json.Unmarshal(data, &state) if err != nil { - return err + return diag.FromErr(err) } // Create a new snapshot based on the deployment state file. opts, err := files.GetSyncOptions(ctx, b) if err != nil { - return err + return diag.FromErr(err) } log.Infof(ctx, "Creating new snapshot") snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.Config.Path), opts) if err != nil { - return err + return diag.FromErr(err) } // Persist the snapshot to disk. log.Infof(ctx, "Persisting snapshot to disk") - return snapshot.Save(ctx) + return diag.FromErr(snapshot.Save(ctx)) } func (s *statePull) remoteState(ctx context.Context, f filer.Filer) (*bytes.Buffer, error) { diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index 50eb90916..9716a1e04 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -106,8 +106,8 @@ func testStatePull(t *testing.T, opts statePullOpts) { require.NoError(t, err) } - err := bundle.Apply(ctx, b, s) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) // Check that deployment state was written statePath, err := getPathToStateFile(ctx, b) @@ -263,8 +263,8 @@ func TestStatePullNoState(t *testing.T) { } ctx := context.Background() - err := bundle.Apply(ctx, b, s) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) // Check that deployment state was not written statePath, err := getPathToStateFile(ctx, b) @@ -451,7 +451,7 @@ func TestStatePullNewerDeploymentStateVersion(t *testing.T) { } ctx := context.Background() - err := bundle.Apply(ctx, b, s) - require.Error(t, err) - require.Contains(t, err.Error(), "remote deployment state is incompatible with the current version of the CLI, please upgrade to at least 1.2.3") + diags := bundle.Apply(ctx, b, s) + require.True(t, diags.HasError()) + require.ErrorContains(t, diags.Error(), "remote deployment state is incompatible with the current version of the CLI, please upgrade to at least 1.2.3") } diff --git a/bundle/deploy/state_push.go b/bundle/deploy/state_push.go index 8818d0a73..176a907c8 100644 --- a/bundle/deploy/state_push.go +++ b/bundle/deploy/state_push.go @@ -5,6 +5,7 @@ import ( "os" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" ) @@ -17,27 +18,27 @@ func (s *statePush) Name() string { return "deploy:state-push" } -func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { +func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { f, err := s.filerFactory(b) if err != nil { - return err + return diag.FromErr(err) } statePath, err := getPathToStateFile(ctx, b) if err != nil { - return err + return diag.FromErr(err) } local, err := os.Open(statePath) if err != nil { - return err + return diag.FromErr(err) } defer local.Close() log.Infof(ctx, "Writing local deployment state file to remote state directory") err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/bundle/deploy/state_push_test.go b/bundle/deploy/state_push_test.go index 37b865ecb..c6d9f88f5 100644 --- a/bundle/deploy/state_push_test.go +++ b/bundle/deploy/state_push_test.go @@ -77,6 +77,6 @@ func TestStatePush(t *testing.T) { err = os.WriteFile(statePath, data, 0644) require.NoError(t, err) - err = bundle.Apply(ctx, b, s) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) } diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go index 0ae61a6e2..cf2e9ac9e 100644 --- a/bundle/deploy/state_update.go +++ b/bundle/deploy/state_update.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -21,10 +22,10 @@ func (s *stateUpdate) Name() string { return "deploy:state-update" } -func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) error { +func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { state, err := load(ctx, b) if err != nil { - return err + return diag.FromErr(err) } // Increment the state sequence. @@ -40,41 +41,41 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) error { // Get the current file list. sync, err := files.GetSync(ctx, b) if err != nil { - return err + return diag.FromErr(err) } files, err := sync.GetFileList(ctx) if err != nil { - return err + return diag.FromErr(err) } // Update the state with the current file list. fl, err := FromSlice(files) if err != nil { - return err + return diag.FromErr(err) } state.Files = fl statePath, err := getPathToStateFile(ctx, b) if err != nil { - return err + return diag.FromErr(err) } // Write the state back to the file. f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0600) if err != nil { log.Infof(ctx, "Unable to open deployment state file: %s", err) - return err + return diag.FromErr(err) } defer f.Close() data, err := json.Marshal(state) if err != nil { - return err + return diag.FromErr(err) } _, err = io.Copy(f, bytes.NewReader(data)) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go index 5e16dd008..73b7fe4b3 100644 --- a/bundle/deploy/state_update_test.go +++ b/bundle/deploy/state_update_test.go @@ -55,8 +55,8 @@ func TestStateUpdate(t *testing.T) { ctx := context.Background() - err := bundle.Apply(ctx, b, s) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) // Check that the state file was updated. state, err := load(ctx, b) @@ -66,8 +66,8 @@ func TestStateUpdate(t *testing.T) { require.Len(t, state.Files, 3) require.Equal(t, build.GetInfo().Version, state.CliVersion) - err = bundle.Apply(ctx, b, s) - require.NoError(t, err) + diags = bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) // Check that the state file was updated again. state, err = load(ctx, b) @@ -136,8 +136,8 @@ func TestStateUpdateWithExistingState(t *testing.T) { err = os.WriteFile(statePath, data, 0644) require.NoError(t, err) - err = bundle.Apply(ctx, b, s) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) // Check that the state file was updated. state, err = load(ctx, b) diff --git a/bundle/deploy/terraform/apply.go b/bundle/deploy/terraform/apply.go index 117cdfc18..e4acda852 100644 --- a/bundle/deploy/terraform/apply.go +++ b/bundle/deploy/terraform/apply.go @@ -2,10 +2,10 @@ package terraform import ( "context" - "fmt" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" "github.com/hashicorp/terraform-exec/tfexec" ) @@ -16,22 +16,22 @@ func (w *apply) Name() string { return "terraform.Apply" } -func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) error { +func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } cmdio.LogString(ctx, "Deploying resources...") err := tf.Init(ctx, tfexec.Upgrade(true)) if err != nil { - return fmt.Errorf("terraform init: %w", err) + return diag.Errorf("terraform init: %v", err) } err = tf.Apply(ctx) if err != nil { - return fmt.Errorf("terraform apply: %w", err) + return diag.Errorf("terraform apply: %v", err) } log.Infof(ctx, "Resource deployment completed") diff --git a/bundle/deploy/terraform/destroy.go b/bundle/deploy/terraform/destroy.go index 0b3baba3b..16f074a22 100644 --- a/bundle/deploy/terraform/destroy.go +++ b/bundle/deploy/terraform/destroy.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/fatih/color" "github.com/hashicorp/terraform-exec/tfexec" tfjson "github.com/hashicorp/terraform-json" @@ -62,7 +63,7 @@ func (w *destroy) Name() string { return "terraform.Destroy" } -func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { +func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // return early if plan is empty if b.Plan.IsEmpty { cmdio.LogString(ctx, "No resources to destroy in plan. Skipping destroy!") @@ -71,19 +72,19 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } // read plan file plan, err := tf.ShowPlanFile(ctx, b.Plan.Path) if err != nil { - return err + return diag.FromErr(err) } // print the resources that will be destroyed err = logDestroyPlan(ctx, plan.ResourceChanges) if err != nil { - return err + return diag.FromErr(err) } // Ask for confirmation, if needed @@ -91,7 +92,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { red := color.New(color.FgRed).SprintFunc() b.Plan.ConfirmApply, err = cmdio.AskYesOrNo(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed?", red("destroy"))) if err != nil { - return err + return diag.FromErr(err) } } @@ -101,7 +102,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { } if b.Plan.Path == "" { - return fmt.Errorf("no plan found") + return diag.Errorf("no plan found") } cmdio.LogString(ctx, "Starting to destroy resources") @@ -109,7 +110,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { // Apply terraform according to the computed destroy plan err = tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path)) if err != nil { - return fmt.Errorf("terraform destroy: %w", err) + return diag.Errorf("terraform destroy: %v", err) } cmdio.LogString(ctx, "Successfully destroyed resources!") diff --git a/bundle/deploy/terraform/import.go b/bundle/deploy/terraform/import.go index 5fc436f20..7c1a68158 100644 --- a/bundle/deploy/terraform/import.go +++ b/bundle/deploy/terraform/import.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/hashicorp/terraform-exec/tfexec" ) @@ -25,31 +26,31 @@ type importResource struct { } // Apply implements bundle.Mutator. -func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { dir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } err = tf.Init(ctx, tfexec.Upgrade(true)) if err != nil { - return fmt.Errorf("terraform init: %w", err) + return diag.Errorf("terraform init: %v", err) } tmpDir, err := os.MkdirTemp("", "state-*") if err != nil { - return fmt.Errorf("terraform init: %w", err) + return diag.Errorf("terraform init: %v", err) } tmpState := filepath.Join(tmpDir, TerraformStateFileName) importAddress := fmt.Sprintf("%s.%s", m.opts.ResourceType, m.opts.ResourceKey) err = tf.Import(ctx, importAddress, m.opts.ResourceId, tfexec.StateOut(tmpState)) if err != nil { - return fmt.Errorf("terraform import: %w", err) + return diag.Errorf("terraform import: %v", err) } buf := bytes.NewBuffer(nil) @@ -58,7 +59,7 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) error { //lint:ignore SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file changed, err := tf.Plan(ctx, tfexec.State(tmpState), tfexec.Target(importAddress)) if err != nil { - return fmt.Errorf("terraform plan: %w", err) + return diag.Errorf("terraform plan: %v", err) } defer os.RemoveAll(tmpDir) @@ -70,29 +71,29 @@ func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) error { cmdio.LogString(ctx, output) ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.") if err != nil { - return err + return diag.FromErr(err) } if !ans { - return fmt.Errorf("import aborted") + return diag.Errorf("import aborted") } } // If user confirmed changes, move the state file from temp dir to state location f, err := os.Create(filepath.Join(dir, TerraformStateFileName)) if err != nil { - return err + return diag.FromErr(err) } defer f.Close() tmpF, err := os.Open(tmpState) if err != nil { - return err + return diag.FromErr(err) } defer tmpF.Close() _, err = io.Copy(f, tmpF) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 503a1db24..ca1fc8caf 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/log" "github.com/hashicorp/go-version" @@ -151,7 +152,7 @@ func setProxyEnvVars(ctx context.Context, environ map[string]string, b *bundle.B return nil } -func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tfConfig := b.Config.Bundle.Terraform if tfConfig == nil { tfConfig = &config.Terraform{} @@ -160,46 +161,46 @@ func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) error { execPath, err := m.findExecPath(ctx, b, tfConfig) if err != nil { - return err + return diag.FromErr(err) } workingDir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } tf, err := tfexec.NewTerraform(workingDir, execPath) if err != nil { - return err + return diag.FromErr(err) } environ, err := b.AuthEnv() if err != nil { - return err + return diag.FromErr(err) } err = inheritEnvVars(ctx, environ) if err != nil { - return err + return diag.FromErr(err) } // Set the temporary directory environment variables err = setTempDirEnvVars(ctx, environ, b) if err != nil { - return err + return diag.FromErr(err) } // Set the proxy related environment variables err = setProxyEnvVars(ctx, environ, b) if err != nil { - return err + return diag.FromErr(err) } // Configure environment variables for auth for Terraform to use. log.Debugf(ctx, "Environment variables for Terraform: %s", strings.Join(maps.Keys(environ), ", ")) err = tf.SetEnv(environ) if err != nil { - return err + return diag.FromErr(err) } b.Terraform = tf diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index 4b00e18e4..bbef7f0f7 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -45,8 +45,8 @@ func TestInitEnvironmentVariables(t *testing.T) { t.Setenv("DATABRICKS_TOKEN", "foobar") b.WorkspaceClient() - err = bundle.Apply(context.Background(), b, Initialize()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, Initialize()) + require.NoError(t, diags.Error()) } func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) { diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index 525a38fa8..358279a7a 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/dynvar" ) @@ -20,8 +21,8 @@ func (m *interpolateMutator) Name() string { return "terraform.Interpolate" } -func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) error { - return b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { +func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { prefix := dyn.MustPathFromString("resources") // Resolve variable references in all values. @@ -61,4 +62,6 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) error return dyn.V(fmt.Sprintf("${%s}", path.String())), nil }) }) + + return diag.FromErr(err) } diff --git a/bundle/deploy/terraform/interpolate_test.go b/bundle/deploy/terraform/interpolate_test.go index be905ad77..9af4a1443 100644 --- a/bundle/deploy/terraform/interpolate_test.go +++ b/bundle/deploy/terraform/interpolate_test.go @@ -55,8 +55,8 @@ func TestInterpolate(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, Interpolate()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, Interpolate()) + require.NoError(t, diags.Error()) j := b.Config.Resources.Jobs["my_job"] assert.Equal(t, "${databricks_pipeline.other_pipeline.id}", j.Tags["other_pipeline"]) @@ -87,6 +87,6 @@ func TestInterpolateUnknownResourceType(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, Interpolate()) - assert.Contains(t, err.Error(), `reference does not exist: ${resources.unknown.other_unknown.id}`) + diags := bundle.Apply(context.Background(), b, Interpolate()) + assert.ErrorContains(t, diags.Error(), `reference does not exist: ${resources.unknown.other_unknown.id}`) } diff --git a/bundle/deploy/terraform/load.go b/bundle/deploy/terraform/load.go index 624bf7a50..fa0cd5b4f 100644 --- a/bundle/deploy/terraform/load.go +++ b/bundle/deploy/terraform/load.go @@ -6,6 +6,7 @@ import ( "slices" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/hashicorp/terraform-exec/tfexec" tfjson "github.com/hashicorp/terraform-json" ) @@ -22,31 +23,31 @@ func (l *load) Name() string { return "terraform.Load" } -func (l *load) Apply(ctx context.Context, b *bundle.Bundle) error { +func (l *load) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } err := tf.Init(ctx, tfexec.Upgrade(true)) if err != nil { - return fmt.Errorf("terraform init: %w", err) + return diag.Errorf("terraform init: %v", err) } state, err := b.Terraform.Show(ctx) if err != nil { - return err + return diag.FromErr(err) } err = l.validateState(state) if err != nil { - return err + return diag.FromErr(err) } // Merge state into configuration. err = TerraformToBundle(state, &b.Config) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/bundle/deploy/terraform/load_test.go b/bundle/deploy/terraform/load_test.go index aeaffa14e..a912c5213 100644 --- a/bundle/deploy/terraform/load_test.go +++ b/bundle/deploy/terraform/load_test.go @@ -32,10 +32,10 @@ func TestLoadWithNoState(t *testing.T) { t.Setenv("DATABRICKS_TOKEN", "foobar") b.WorkspaceClient() - err = bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( Initialize(), Load(ErrorOnEmptyState), )) - require.ErrorContains(t, err, "Did you forget to run 'databricks bundle deploy'") + require.ErrorContains(t, diags.Error(), "Did you forget to run 'databricks bundle deploy'") } diff --git a/bundle/deploy/terraform/plan.go b/bundle/deploy/terraform/plan.go index ff841148c..50e0f78ca 100644 --- a/bundle/deploy/terraform/plan.go +++ b/bundle/deploy/terraform/plan.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/terraform" "github.com/hashicorp/terraform-exec/tfexec" ) @@ -26,30 +27,30 @@ func (p *plan) Name() string { return "terraform.Plan" } -func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) error { +func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } cmdio.LogString(ctx, "Starting plan computation") err := tf.Init(ctx, tfexec.Upgrade(true)) if err != nil { - return fmt.Errorf("terraform init: %w", err) + return diag.Errorf("terraform init: %v", err) } // Persist computed plan tfDir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } planPath := filepath.Join(tfDir, "plan") destroy := p.goal == PlanDestroy notEmpty, err := tf.Plan(ctx, tfexec.Destroy(destroy), tfexec.Out(planPath)) if err != nil { - return err + return diag.FromErr(err) } // Set plan in main bundle struct for downstream mutators diff --git a/bundle/deploy/terraform/state_pull.go b/bundle/deploy/terraform/state_pull.go index 045222ae0..cc7d34274 100644 --- a/bundle/deploy/terraform/state_pull.go +++ b/bundle/deploy/terraform/state_pull.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/deploy" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" ) @@ -45,15 +46,15 @@ func (l *statePull) remoteState(ctx context.Context, f filer.Filer) (*bytes.Buff return &buf, nil } -func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { +func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { f, err := l.filerFactory(b) if err != nil { - return err + return diag.FromErr(err) } dir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } // Download state file from filer to local cache directory. @@ -61,7 +62,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { remote, err := l.remoteState(ctx, f) if err != nil { log.Infof(ctx, "Unable to open remote state file: %s", err) - return err + return diag.FromErr(err) } if remote == nil { log.Infof(ctx, "Remote state file does not exist") @@ -71,7 +72,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { // Expect the state file to live under dir. local, err := os.OpenFile(filepath.Join(dir, TerraformStateFileName), os.O_CREATE|os.O_RDWR, 0600) if err != nil { - return err + return diag.FromErr(err) } defer local.Close() @@ -88,7 +89,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { log.Infof(ctx, "Writing remote state file to local cache directory") _, err = io.Copy(local, bytes.NewReader(remote.Bytes())) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/bundle/deploy/terraform/state_pull_test.go b/bundle/deploy/terraform/state_pull_test.go index b7734a10f..805b5af0f 100644 --- a/bundle/deploy/terraform/state_pull_test.go +++ b/bundle/deploy/terraform/state_pull_test.go @@ -15,12 +15,11 @@ import ( "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" ) func mockStateFilerForPull(t *testing.T, contents map[string]int, merr error) filer.Filer { buf, err := json.Marshal(contents) - require.NoError(t, err) + assert.NoError(t, err) f := mockfiler.NewMockFiler(t) f. @@ -49,11 +48,11 @@ func TestStatePullLocalMissingRemoteMissing(t *testing.T) { ctx := context.Background() b := statePullTestBundle(t) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) // Confirm that no local state file has been written. - _, err = os.Stat(localStateFile(t, ctx, b)) + _, err := os.Stat(localStateFile(t, ctx, b)) assert.ErrorIs(t, err, fs.ErrNotExist) } @@ -64,8 +63,8 @@ func TestStatePullLocalMissingRemotePresent(t *testing.T) { ctx := context.Background() b := statePullTestBundle(t) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) // Confirm that the local state file has been updated. localState := readLocalState(t, ctx, b) @@ -82,8 +81,8 @@ func TestStatePullLocalStale(t *testing.T) { // Write a stale local state file. writeLocalState(t, ctx, b, map[string]int{"serial": 4}) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) // Confirm that the local state file has been updated. localState := readLocalState(t, ctx, b) @@ -100,8 +99,8 @@ func TestStatePullLocalEqual(t *testing.T) { // Write a local state file with the same serial as the remote. writeLocalState(t, ctx, b, map[string]int{"serial": 5}) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) // Confirm that the local state file has not been updated. localState := readLocalState(t, ctx, b) @@ -118,8 +117,8 @@ func TestStatePullLocalNewer(t *testing.T) { // Write a local state file with a newer serial as the remote. writeLocalState(t, ctx, b, map[string]int{"serial": 6}) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) // Confirm that the local state file has not been updated. localState := readLocalState(t, ctx, b) diff --git a/bundle/deploy/terraform/state_push.go b/bundle/deploy/terraform/state_push.go index f701db87d..b50983bd4 100644 --- a/bundle/deploy/terraform/state_push.go +++ b/bundle/deploy/terraform/state_push.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" ) @@ -20,21 +21,21 @@ func (l *statePush) Name() string { return "terraform:state-push" } -func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { +func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { f, err := l.filerFactory(b) if err != nil { - return err + return diag.FromErr(err) } dir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } // Expect the state file to live under dir. local, err := os.Open(filepath.Join(dir, TerraformStateFileName)) if err != nil { - return err + return diag.FromErr(err) } defer local.Close() @@ -43,7 +44,7 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { log.Infof(ctx, "Writing local state file to remote state directory") err = f.Write(ctx, TerraformStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/bundle/deploy/terraform/state_push_test.go b/bundle/deploy/terraform/state_push_test.go index bd4514a5f..41d384900 100644 --- a/bundle/deploy/terraform/state_push_test.go +++ b/bundle/deploy/terraform/state_push_test.go @@ -56,6 +56,6 @@ func TestStatePush(t *testing.T) { // Write a stale local state file. writeLocalState(t, ctx, b, map[string]int{"serial": 4}) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) } diff --git a/bundle/deploy/terraform/unbind.go b/bundle/deploy/terraform/unbind.go index 74e15e184..49d65615e 100644 --- a/bundle/deploy/terraform/unbind.go +++ b/bundle/deploy/terraform/unbind.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/hashicorp/terraform-exec/tfexec" ) @@ -13,20 +14,20 @@ type unbind struct { resourceKey string } -func (m *unbind) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *unbind) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } err := tf.Init(ctx, tfexec.Upgrade(true)) if err != nil { - return fmt.Errorf("terraform init: %w", err) + return diag.Errorf("terraform init: %v", err) } err = tf.StateRm(ctx, fmt.Sprintf("%s.%s", m.resourceType, m.resourceKey)) if err != nil { - return fmt.Errorf("terraform state rm: %w", err) + return diag.Errorf("terraform state rm: %v", err) } return nil diff --git a/bundle/deploy/terraform/write.go b/bundle/deploy/terraform/write.go index e688f6a61..bee777ffe 100644 --- a/bundle/deploy/terraform/write.go +++ b/bundle/deploy/terraform/write.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" ) @@ -17,10 +18,10 @@ func (w *write) Name() string { return "terraform.Write" } -func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error { +func (w *write) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { dir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } var root *schema.Root @@ -29,12 +30,12 @@ func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error { return v, err }) if err != nil { - return err + return diag.FromErr(err) } f, err := os.Create(filepath.Join(dir, TerraformConfigFileName)) if err != nil { - return err + return diag.FromErr(err) } defer f.Close() @@ -43,7 +44,7 @@ func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error { enc.SetIndent("", " ") err = enc.Encode(root) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/bundle/libraries/match.go b/bundle/libraries/match.go index c8fd2baec..d051e163c 100644 --- a/bundle/libraries/match.go +++ b/bundle/libraries/match.go @@ -2,9 +2,9 @@ package libraries import ( "context" - "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -19,17 +19,17 @@ func (a *match) Name() string { return "libraries.MatchWithArtifacts" } -func (a *match) Apply(ctx context.Context, b *bundle.Bundle) error { +func (a *match) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tasks := findAllTasks(b) for _, task := range tasks { if isMissingRequiredLibraries(task) { - return fmt.Errorf("task '%s' is missing required libraries. Please include your package code in task libraries block", task.TaskKey) + return diag.Errorf("task '%s' is missing required libraries. Please include your package code in task libraries block", task.TaskKey) } for j := range task.Libraries { lib := &task.Libraries[j] _, err := findArtifactFiles(ctx, lib, b) if err != nil { - return err + return diag.FromErr(err) } } } diff --git a/bundle/log_string.go b/bundle/log_string.go index 63800d6df..f14e3a3ad 100644 --- a/bundle/log_string.go +++ b/bundle/log_string.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" ) type LogStringMutator struct { @@ -20,7 +21,7 @@ func LogString(message string) Mutator { } } -func (m *LogStringMutator) Apply(ctx context.Context, b *Bundle) error { +func (m *LogStringMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { cmdio.LogString(ctx, m.message) return nil diff --git a/bundle/mutator.go b/bundle/mutator.go index bd1615fd7..6c9968aac 100644 --- a/bundle/mutator.go +++ b/bundle/mutator.go @@ -3,6 +3,7 @@ package bundle import ( "context" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -13,10 +14,10 @@ type Mutator interface { Name() string // Apply mutates the specified bundle object. - Apply(context.Context, *Bundle) error + Apply(context.Context, *Bundle) diag.Diagnostics } -func Apply(ctx context.Context, b *Bundle, m Mutator) error { +func Apply(ctx context.Context, b *Bundle, m Mutator) diag.Diagnostics { ctx = log.NewContext(ctx, log.GetLogger(ctx).With("mutator", m.Name())) log.Debugf(ctx, "Apply") @@ -24,7 +25,7 @@ func Apply(ctx context.Context, b *Bundle, m Mutator) error { err := b.Config.MarkMutatorEntry(ctx) if err != nil { log.Errorf(ctx, "entry error: %s", err) - return err + return diag.Errorf("entry error: %s", err) } defer func() { @@ -34,28 +35,32 @@ func Apply(ctx context.Context, b *Bundle, m Mutator) error { } }() - err = m.Apply(ctx, b) - if err != nil { + diags := m.Apply(ctx, b) + + // Log error in diagnostics if any. + // Note: errors should be logged when constructing them + // such that they are not logged multiple times. + // If this is done, we can omit this block. + if err := diags.Error(); err != nil { log.Errorf(ctx, "Error: %s", err) - return err } - return nil + return diags } type funcMutator struct { - fn func(context.Context, *Bundle) error + fn func(context.Context, *Bundle) diag.Diagnostics } func (m funcMutator) Name() string { return "" } -func (m funcMutator) Apply(ctx context.Context, b *Bundle) error { +func (m funcMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { return m.fn(ctx, b) } // ApplyFunc applies an inline-specified function mutator. -func ApplyFunc(ctx context.Context, b *Bundle, fn func(context.Context, *Bundle) error) error { +func ApplyFunc(ctx context.Context, b *Bundle, fn func(context.Context, *Bundle) diag.Diagnostics) diag.Diagnostics { return Apply(ctx, b, funcMutator{fn}) } diff --git a/bundle/mutator_test.go b/bundle/mutator_test.go index c1f3c075f..04ff19cff 100644 --- a/bundle/mutator_test.go +++ b/bundle/mutator_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/databricks/cli/libs/diag" "github.com/stretchr/testify/assert" ) @@ -16,7 +17,7 @@ func (t *testMutator) Name() string { return "test" } -func (t *testMutator) Apply(ctx context.Context, b *Bundle) error { +func (t *testMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { t.applyCalled++ return Apply(ctx, b, Seq(t.nestedMutators...)) } @@ -35,8 +36,8 @@ func TestMutator(t *testing.T) { } b := &Bundle{} - err := Apply(context.Background(), b, m) - assert.NoError(t, err) + diags := Apply(context.Background(), b, m) + assert.NoError(t, diags.Error()) assert.Equal(t, 1, m.applyCalled) assert.Equal(t, 1, nested[0].applyCalled) diff --git a/bundle/permissions/filter.go b/bundle/permissions/filter.go index f4834a656..6d39630c8 100644 --- a/bundle/permissions/filter.go +++ b/bundle/permissions/filter.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" ) @@ -59,10 +60,10 @@ func filter(currentUser string) dyn.WalkValueFunc { } } -func (m *filterCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *filterCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { currentUser := b.Config.Workspace.CurrentUser.UserName - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { rv, err := dyn.Get(v, "resources") if err != nil { return dyn.InvalidValue, err @@ -77,4 +78,6 @@ func (m *filterCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error { // Set the resources with the filtered permissions back into the bundle return dyn.Set(v, "resources", nv) }) + + return diag.FromErr(err) } diff --git a/bundle/permissions/filter_test.go b/bundle/permissions/filter_test.go index 07f5ae77d..410fa4be8 100644 --- a/bundle/permissions/filter_test.go +++ b/bundle/permissions/filter_test.go @@ -89,8 +89,8 @@ func testFixture(userName string) *bundle.Bundle { func TestFilterCurrentUser(t *testing.T) { b := testFixture("alice@databricks.com") - err := bundle.Apply(context.Background(), b, FilterCurrentUser()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, diags.Error()) // Assert current user is filtered out. assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) @@ -124,8 +124,8 @@ func TestFilterCurrentUser(t *testing.T) { func TestFilterCurrentServicePrincipal(t *testing.T) { b := testFixture("i-Robot") - err := bundle.Apply(context.Background(), b, FilterCurrentUser()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, diags.Error()) // Assert current user is filtered out. assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) @@ -169,6 +169,6 @@ func TestFilterCurrentUserDoesNotErrorWhenNoResources(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, FilterCurrentUser()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, diags.Error()) } diff --git a/bundle/permissions/mutator.go b/bundle/permissions/mutator.go index 54925d1c8..7787bc048 100644 --- a/bundle/permissions/mutator.go +++ b/bundle/permissions/mutator.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) const CAN_MANAGE = "CAN_MANAGE" @@ -46,10 +47,10 @@ func ApplyBundlePermissions() bundle.Mutator { return &bundlePermissions{} } -func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := validate(b) if err != nil { - return err + return diag.FromErr(err) } applyForJobs(ctx, b) diff --git a/bundle/permissions/mutator_test.go b/bundle/permissions/mutator_test.go index 62c0589d3..438a15061 100644 --- a/bundle/permissions/mutator_test.go +++ b/bundle/permissions/mutator_test.go @@ -46,8 +46,8 @@ func TestApplyBundlePermissions(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, ApplyBundlePermissions()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ApplyBundlePermissions()) + require.NoError(t, diags.Error()) require.Len(t, b.Config.Resources.Jobs["job_1"].Permissions, 3) require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) @@ -123,8 +123,8 @@ func TestWarningOnOverlapPermission(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, ApplyBundlePermissions()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ApplyBundlePermissions()) + require.NoError(t, diags.Error()) require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_VIEW", UserName: "TestUser"}) require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) diff --git a/bundle/permissions/workspace_root.go b/bundle/permissions/workspace_root.go index a8eb9e278..a59a039f6 100644 --- a/bundle/permissions/workspace_root.go +++ b/bundle/permissions/workspace_root.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/workspace" ) @@ -16,10 +17,10 @@ func ApplyWorkspaceRootPermissions() bundle.Mutator { } // Apply implements bundle.Mutator. -func (*workspaceRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) error { +func (*workspaceRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := giveAccessForWorkspaceRoot(ctx, b) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/bundle/permissions/workspace_root_test.go b/bundle/permissions/workspace_root_test.go index 6f03204fa..7dd97b62d 100644 --- a/bundle/permissions/workspace_root_test.go +++ b/bundle/permissions/workspace_root_test.go @@ -69,6 +69,6 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) { WorkspaceObjectType: "directories", }).Return(nil, nil) - err := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions()) + require.NoError(t, diags.Error()) } diff --git a/bundle/phases/phase.go b/bundle/phases/phase.go index b594e1f62..1bb4f86a2 100644 --- a/bundle/phases/phase.go +++ b/bundle/phases/phase.go @@ -5,6 +5,7 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -26,7 +27,7 @@ func (p *phase) Name() string { return p.name } -func (p *phase) Apply(ctx context.Context, b *bundle.Bundle) error { +func (p *phase) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { log.Infof(ctx, "Phase: %s", p.Name()) return bundle.Apply(ctx, b, bundle.Seq(p.mutators...)) } diff --git a/bundle/python/conditional_transform_test.go b/bundle/python/conditional_transform_test.go index 4c7cad5c5..b4d7f9edb 100644 --- a/bundle/python/conditional_transform_test.go +++ b/bundle/python/conditional_transform_test.go @@ -47,8 +47,8 @@ func TestNoTransformByDefault(t *testing.T) { } trampoline := TransformWheelTask() - err := bundle.Apply(context.Background(), b, trampoline) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, trampoline) + require.NoError(t, diags.Error()) task := b.Config.Resources.Jobs["job1"].Tasks[0] require.NotNil(t, task.PythonWheelTask) @@ -96,8 +96,8 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { } trampoline := TransformWheelTask() - err := bundle.Apply(context.Background(), b, trampoline) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, trampoline) + require.NoError(t, diags.Error()) task := b.Config.Resources.Jobs["job1"].Tasks[0] require.Nil(t, task.PythonWheelTask) diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index b6427ccd8..729efe1a9 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -140,6 +140,6 @@ func TestNoPanicWithNoPythonWheelTasks(t *testing.T) { }, } trampoline := TransformWheelTask() - err := bundle.Apply(context.Background(), b, trampoline) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, trampoline) + require.NoError(t, diags.Error()) } diff --git a/bundle/python/warning.go b/bundle/python/warning.go index 9b9fd8e59..060509ad3 100644 --- a/bundle/python/warning.go +++ b/bundle/python/warning.go @@ -2,11 +2,11 @@ package python import ( "context" - "fmt" "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "golang.org/x/mod/semver" @@ -19,13 +19,13 @@ func WrapperWarning() bundle.Mutator { return &wrapperWarning{} } -func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if isPythonWheelWrapperOn(b) { return nil } if hasIncompatibleWheelTasks(ctx, b) { - return fmt.Errorf("python wheel tasks with local libraries require compute with DBR 13.1+. Please change your cluster configuration or set experimental 'python_wheel_wrapper' setting to 'true'") + return diag.Errorf("python wheel tasks with local libraries require compute with DBR 13.1+. Please change your cluster configuration or set experimental 'python_wheel_wrapper' setting to 'true'") } return nil } diff --git a/bundle/python/warning_test.go b/bundle/python/warning_test.go index c8dde59ec..f1fdf0bcf 100644 --- a/bundle/python/warning_test.go +++ b/bundle/python/warning_test.go @@ -101,8 +101,8 @@ func TestIncompatibleWheelTasksWithJobClusterKey(t *testing.T) { require.True(t, hasIncompatibleWheelTasks(context.Background(), b)) - err := bundle.Apply(context.Background(), b, WrapperWarning()) - require.ErrorContains(t, err, "python wheel tasks with local libraries require compute with DBR 13.1+.") + diags := bundle.Apply(context.Background(), b, WrapperWarning()) + require.ErrorContains(t, diags.Error(), "python wheel tasks with local libraries require compute with DBR 13.1+.") } func TestIncompatibleWheelTasksWithExistingClusterId(t *testing.T) { @@ -280,8 +280,8 @@ func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, WrapperWarning()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, WrapperWarning()) + require.NoError(t, diags.Error()) } func TestSparkVersionLowerThanExpected(t *testing.T) { diff --git a/bundle/scripts/scripts.go b/bundle/scripts/scripts.go index 2f13bc19f..f8ed7d6a3 100644 --- a/bundle/scripts/scripts.go +++ b/bundle/scripts/scripts.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/exec" "github.com/databricks/cli/libs/log" ) @@ -28,15 +29,15 @@ func (m *script) Name() string { return fmt.Sprintf("scripts.%s", m.scriptHook) } -func (m *script) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *script) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { executor, err := exec.NewCommandExecutor(b.Config.Path) if err != nil { - return err + return diag.FromErr(err) } cmd, out, err := executeHook(ctx, executor, b, m.scriptHook) if err != nil { - return err + return diag.FromErr(err) } if cmd == nil { log.Debugf(ctx, "No script defined for %s, skipping", m.scriptHook) @@ -52,7 +53,7 @@ func (m *script) Apply(ctx context.Context, b *bundle.Bundle) error { line, err = reader.ReadString('\n') } - return cmd.Wait() + return diag.FromErr(cmd.Wait()) } func executeHook(ctx context.Context, executor *exec.Executor, b *bundle.Bundle, hook config.ScriptHook) (exec.Command, io.Reader, error) { diff --git a/bundle/scripts/scripts_test.go b/bundle/scripts/scripts_test.go index bc3202e06..fa5c23970 100644 --- a/bundle/scripts/scripts_test.go +++ b/bundle/scripts/scripts_test.go @@ -46,6 +46,6 @@ func TestExecuteMutator(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, Execute(config.ScriptPreInit)) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, Execute(config.ScriptPreInit)) + require.NoError(t, diags.Error()) } diff --git a/bundle/seq.go b/bundle/seq.go index 89e760d1f..c1260a3f0 100644 --- a/bundle/seq.go +++ b/bundle/seq.go @@ -1,6 +1,10 @@ package bundle -import "context" +import ( + "context" + + "github.com/databricks/cli/libs/diag" +) type seqMutator struct { mutators []Mutator @@ -10,14 +14,15 @@ func (s *seqMutator) Name() string { return "seq" } -func (s *seqMutator) Apply(ctx context.Context, b *Bundle) error { +func (s *seqMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { + var diags diag.Diagnostics for _, m := range s.mutators { - err := Apply(ctx, b, m) - if err != nil { - return err + diags = diags.Extend(Apply(ctx, b, m)) + if diags.HasError() { + break } } - return nil + return diags } func Seq(ms ...Mutator) Mutator { diff --git a/bundle/seq_test.go b/bundle/seq_test.go index d5c229e3c..74f975ed8 100644 --- a/bundle/seq_test.go +++ b/bundle/seq_test.go @@ -14,8 +14,8 @@ func TestSeqMutator(t *testing.T) { seqMutator := Seq(m1, m2, m3) b := &Bundle{} - err := Apply(context.Background(), b, seqMutator) - assert.NoError(t, err) + diags := Apply(context.Background(), b, seqMutator) + assert.NoError(t, diags.Error()) assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) @@ -30,8 +30,8 @@ func TestSeqWithDeferredMutator(t *testing.T) { seqMutator := Seq(m1, Defer(m2, m3), m4) b := &Bundle{} - err := Apply(context.Background(), b, seqMutator) - assert.NoError(t, err) + diags := Apply(context.Background(), b, seqMutator) + assert.NoError(t, diags.Error()) assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) @@ -47,8 +47,8 @@ func TestSeqWithErrorAndDeferredMutator(t *testing.T) { seqMutator := Seq(errorMut, Defer(m1, m2), m3) b := &Bundle{} - err := Apply(context.Background(), b, seqMutator) - assert.Error(t, err) + diags := Apply(context.Background(), b, seqMutator) + assert.Error(t, diags.Error()) assert.Equal(t, 1, errorMut.applyCalled) assert.Equal(t, 0, m1.applyCalled) @@ -64,8 +64,8 @@ func TestSeqWithErrorInsideDeferredMutator(t *testing.T) { seqMutator := Seq(m1, Defer(errorMut, m2), m3) b := &Bundle{} - err := Apply(context.Background(), b, seqMutator) - assert.Error(t, err) + diags := Apply(context.Background(), b, seqMutator) + assert.Error(t, diags.Error()) assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, errorMut.applyCalled) @@ -81,8 +81,8 @@ func TestSeqWithErrorInsideFinallyStage(t *testing.T) { seqMutator := Seq(m1, Defer(m2, errorMut), m3) b := &Bundle{} - err := Apply(context.Background(), b, seqMutator) - assert.Error(t, err) + diags := Apply(context.Background(), b, seqMutator) + assert.Error(t, diags.Error()) assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) diff --git a/bundle/tests/bundle_permissions_test.go b/bundle/tests/bundle_permissions_test.go index 3ea9dc2e0..b55cbdd2b 100644 --- a/bundle/tests/bundle_permissions_test.go +++ b/bundle/tests/bundle_permissions_test.go @@ -18,8 +18,9 @@ func TestBundlePermissions(t *testing.T) { assert.NotContains(t, b.Config.Permissions, resources.Permission{Level: "CAN_VIEW", ServicePrincipalName: "1234-abcd"}) assert.NotContains(t, b.Config.Permissions, resources.Permission{Level: "CAN_RUN", UserName: "bot@company.com"}) - err := bundle.Apply(context.Background(), b, permissions.ApplyBundlePermissions()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, permissions.ApplyBundlePermissions()) + require.NoError(t, diags.Error()) + pipelinePermissions := b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Permissions assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_RUN", UserName: "test@company.com"}) assert.NotContains(t, pipelinePermissions, resources.Permission{Level: "CAN_MANAGE", GroupName: "devs"}) @@ -40,8 +41,9 @@ func TestBundlePermissionsDevTarget(t *testing.T) { assert.Contains(t, b.Config.Permissions, resources.Permission{Level: "CAN_VIEW", ServicePrincipalName: "1234-abcd"}) assert.Contains(t, b.Config.Permissions, resources.Permission{Level: "CAN_RUN", UserName: "bot@company.com"}) - err := bundle.Apply(context.Background(), b, permissions.ApplyBundlePermissions()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, permissions.ApplyBundlePermissions()) + require.NoError(t, diags.Error()) + pipelinePermissions := b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Permissions assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_RUN", UserName: "test@company.com"}) assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_MANAGE", GroupName: "devs"}) diff --git a/bundle/tests/conflicting_resource_ids_test.go b/bundle/tests/conflicting_resource_ids_test.go index 704683ad5..16dd1c33a 100644 --- a/bundle/tests/conflicting_resource_ids_test.go +++ b/bundle/tests/conflicting_resource_ids_test.go @@ -23,18 +23,18 @@ func TestConflictingResourceIdsOneSubconfig(t *testing.T) { ctx := context.Background() b, err := bundle.Load(ctx, "./conflicting_resource_ids/one_subconfiguration") require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) bundleConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/databricks.yml") resourcesConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/resources.yml") - assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, resourcesConfigPath)) + assert.ErrorContains(t, diags.Error(), fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, resourcesConfigPath)) } func TestConflictingResourceIdsTwoSubconfigs(t *testing.T) { ctx := context.Background() b, err := bundle.Load(ctx, "./conflicting_resource_ids/two_subconfigurations") require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) resources1ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources1.yml") resources2ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources2.yml") - assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", resources1ConfigPath, resources2ConfigPath)) + assert.ErrorContains(t, diags.Error(), fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", resources1ConfigPath, resources2ConfigPath)) } diff --git a/bundle/tests/git_test.go b/bundle/tests/git_test.go index c5ae83a20..b33ffc211 100644 --- a/bundle/tests/git_test.go +++ b/bundle/tests/git_test.go @@ -34,6 +34,6 @@ func TestGitBundleBranchValidation(t *testing.T) { assert.Equal(t, "feature-a", b.Config.Bundle.Git.Branch) assert.Equal(t, "feature-b", b.Config.Bundle.Git.ActualBranch) - err := bundle.Apply(context.Background(), b, mutator.ValidateGitDetails()) - assert.ErrorContains(t, err, "not on the right Git branch:") + diags := bundle.Apply(context.Background(), b, mutator.ValidateGitDetails()) + assert.ErrorContains(t, diags.Error(), "not on the right Git branch:") } diff --git a/bundle/tests/include_test.go b/bundle/tests/include_test.go index eb09d1aa0..fd8ae7198 100644 --- a/bundle/tests/include_test.go +++ b/bundle/tests/include_test.go @@ -17,9 +17,9 @@ func TestIncludeInvalid(t *testing.T) { ctx := context.Background() b, err := bundle.Load(ctx, "./include_invalid") require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) - require.Error(t, err) - assert.Contains(t, err.Error(), "notexists.yml defined in 'include' section does not match any files") + diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + require.Error(t, diags.Error()) + assert.ErrorContains(t, diags.Error(), "notexists.yml defined in 'include' section does not match any files") } func TestIncludeWithGlob(t *testing.T) { diff --git a/bundle/tests/interpolation_test.go b/bundle/tests/interpolation_test.go index a9659d33f..920b9000d 100644 --- a/bundle/tests/interpolation_test.go +++ b/bundle/tests/interpolation_test.go @@ -12,23 +12,22 @@ import ( func TestInterpolation(t *testing.T) { b := load(t, "./interpolation") - err := bundle.Apply(context.Background(), b, mutator.ResolveVariableReferences( + diags := bundle.Apply(context.Background(), b, mutator.ResolveVariableReferences( "bundle", "workspace", )) - require.NoError(t, err) + require.NoError(t, diags.Error()) assert.Equal(t, "foo bar", b.Config.Bundle.Name) assert.Equal(t, "foo bar | bar", b.Config.Resources.Jobs["my_job"].Name) } func TestInterpolationWithTarget(t *testing.T) { b := loadTarget(t, "./interpolation_target", "development") - err := bundle.Apply(context.Background(), b, mutator.ResolveVariableReferences( + diags := bundle.Apply(context.Background(), b, mutator.ResolveVariableReferences( "bundle", "workspace", )) - require.NoError(t, err) + require.NoError(t, diags.Error()) assert.Equal(t, "foo bar", b.Config.Bundle.Name) assert.Equal(t, "foo bar | bar | development | development", b.Config.Resources.Jobs["my_job"].Name) - } diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index 3a28d822a..228763ce9 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -13,8 +13,8 @@ func load(t *testing.T, path string) *bundle.Bundle { ctx := context.Background() b, err := bundle.Load(ctx, path) require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + require.NoError(t, diags.Error()) return b } @@ -22,14 +22,14 @@ func loadTarget(t *testing.T, path, env string) *bundle.Bundle { ctx := context.Background() b, err := bundle.Load(ctx, path) require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutatorsForTarget(env)...)) - require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq( + diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutatorsForTarget(env)...)) + require.NoError(t, diags.Error()) + diags = bundle.Apply(ctx, b, bundle.Seq( mutator.RewriteSyncPaths(), mutator.MergeJobClusters(), mutator.MergeJobTasks(), mutator.MergePipelineClusters(), )) - require.NoError(t, err) + require.NoError(t, diags.Error()) return b } diff --git a/bundle/tests/path_translation_test.go b/bundle/tests/path_translation_test.go index 6c3393450..05702d2a2 100644 --- a/bundle/tests/path_translation_test.go +++ b/bundle/tests/path_translation_test.go @@ -15,8 +15,8 @@ func TestPathTranslationFallback(t *testing.T) { b := loadTarget(t, "./path_translation/fallback", "development") m := mutator.TranslatePaths() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) j := b.Config.Resources.Jobs["my_job"] assert.Len(t, j.Tasks, 6) @@ -54,16 +54,16 @@ func TestPathTranslationFallbackError(t *testing.T) { b := loadTarget(t, "./path_translation/fallback", "error") m := mutator.TranslatePaths() - err := bundle.Apply(context.Background(), b, m) - assert.ErrorContains(t, err, `notebook this value is overridden not found`) + diags := bundle.Apply(context.Background(), b, m) + assert.ErrorContains(t, diags.Error(), `notebook this value is overridden not found`) } func TestPathTranslationNominal(t *testing.T) { b := loadTarget(t, "./path_translation/nominal", "development") m := mutator.TranslatePaths() - err := bundle.Apply(context.Background(), b, m) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + assert.NoError(t, diags.Error()) j := b.Config.Resources.Jobs["my_job"] assert.Len(t, j.Tasks, 8) @@ -107,6 +107,6 @@ func TestPathTranslationNominalError(t *testing.T) { b := loadTarget(t, "./path_translation/nominal", "error") m := mutator.TranslatePaths() - err := bundle.Apply(context.Background(), b, m) - assert.ErrorContains(t, err, `notebook this value is overridden not found`) + diags := bundle.Apply(context.Background(), b, m) + assert.ErrorContains(t, diags.Error(), `notebook this value is overridden not found`) } diff --git a/bundle/tests/pipeline_glob_paths_test.go b/bundle/tests/pipeline_glob_paths_test.go index 85a137926..bf5039b5f 100644 --- a/bundle/tests/pipeline_glob_paths_test.go +++ b/bundle/tests/pipeline_glob_paths_test.go @@ -27,8 +27,8 @@ func TestExpandPipelineGlobPaths(t *testing.T) { b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - err := bundle.Apply(ctx, b, phases.Initialize()) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, phases.Initialize()) + require.NoError(t, diags.Error()) require.Equal( t, "/Users/user@domain.com/.bundle/pipeline_glob_paths/default/files/dlt/nyc_taxi_loader", @@ -50,6 +50,6 @@ func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) { b.SetWorkpaceClient(m.WorkspaceClient) ctx := context.Background() - err := bundle.Apply(ctx, b, phases.Initialize()) - require.ErrorContains(t, err, "notebook ./non-existent not found") + diags := bundle.Apply(ctx, b, phases.Initialize()) + require.ErrorContains(t, diags.Error(), "notebook ./non-existent not found") } diff --git a/bundle/tests/python_wheel_test.go b/bundle/tests/python_wheel_test.go index 8351e96ae..c44e80a57 100644 --- a/bundle/tests/python_wheel_test.go +++ b/bundle/tests/python_wheel_test.go @@ -17,16 +17,16 @@ func TestPythonWheelBuild(t *testing.T) { require.NoError(t, err) m := phases.Build() - err = bundle.Apply(ctx, b, m) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + require.NoError(t, diags.Error()) matches, err := filepath.Glob("./python_wheel/python_wheel/my_test_code/dist/my_test_code-*.whl") require.NoError(t, err) require.Equal(t, 1, len(matches)) match := libraries.MatchWithArtifacts() - err = bundle.Apply(ctx, b, match) - require.NoError(t, err) + diags = bundle.Apply(ctx, b, match) + require.NoError(t, diags.Error()) } func TestPythonWheelBuildAutoDetect(t *testing.T) { @@ -35,16 +35,16 @@ func TestPythonWheelBuildAutoDetect(t *testing.T) { require.NoError(t, err) m := phases.Build() - err = bundle.Apply(ctx, b, m) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + require.NoError(t, diags.Error()) matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact/dist/my_test_code-*.whl") require.NoError(t, err) require.Equal(t, 1, len(matches)) match := libraries.MatchWithArtifacts() - err = bundle.Apply(ctx, b, match) - require.NoError(t, err) + diags = bundle.Apply(ctx, b, match) + require.NoError(t, diags.Error()) } func TestPythonWheelWithDBFSLib(t *testing.T) { @@ -53,12 +53,12 @@ func TestPythonWheelWithDBFSLib(t *testing.T) { require.NoError(t, err) m := phases.Build() - err = bundle.Apply(ctx, b, m) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + require.NoError(t, diags.Error()) match := libraries.MatchWithArtifacts() - err = bundle.Apply(ctx, b, match) - require.NoError(t, err) + diags = bundle.Apply(ctx, b, match) + require.NoError(t, diags.Error()) } func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) { @@ -67,12 +67,12 @@ func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) { require.NoError(t, err) m := phases.Build() - err = bundle.Apply(ctx, b, m) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + require.NoError(t, diags.Error()) match := libraries.MatchWithArtifacts() - err = bundle.Apply(ctx, b, match) - require.ErrorContains(t, err, "./non-existing/*.whl") + diags = bundle.Apply(ctx, b, match) + require.ErrorContains(t, diags.Error(), "./non-existing/*.whl") require.NotZero(t, len(b.Config.Artifacts)) diff --git a/bundle/tests/relative_path_with_includes_test.go b/bundle/tests/relative_path_with_includes_test.go index 1d1f321d4..6e13628be 100644 --- a/bundle/tests/relative_path_with_includes_test.go +++ b/bundle/tests/relative_path_with_includes_test.go @@ -14,8 +14,8 @@ func TestRelativePathsWithIncludes(t *testing.T) { b := loadTarget(t, "./relative_path_with_includes", "default") m := mutator.TranslatePaths() - err := bundle.Apply(context.Background(), b, m) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + assert.NoError(t, diags.Error()) assert.Equal(t, "artifact_a", b.Config.Artifacts["test_a"].Path) assert.Equal(t, filepath.Join("subfolder", "artifact_b"), b.Config.Artifacts["test_b"].Path) diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 98aaf6358..321bb5130 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/stretchr/testify/assert" ) @@ -15,7 +16,7 @@ func TestRunAsDefault(t *testing.T) { b := load(t, "./run_as") ctx := context.Background() - bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { b.Config.Workspace.CurrentUser = &config.User{ User: &iam.User{ UserName: "jane@doe.com", @@ -24,8 +25,8 @@ func TestRunAsDefault(t *testing.T) { return nil }) - err := bundle.Apply(ctx, b, mutator.SetRunAs()) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, diags.Error()) assert.Len(t, b.Config.Resources.Jobs, 3) jobs := b.Config.Resources.Jobs @@ -55,7 +56,7 @@ func TestRunAsDevelopment(t *testing.T) { b := loadTarget(t, "./run_as", "development") ctx := context.Background() - bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { b.Config.Workspace.CurrentUser = &config.User{ User: &iam.User{ UserName: "jane@doe.com", @@ -64,8 +65,8 @@ func TestRunAsDevelopment(t *testing.T) { return nil }) - err := bundle.Apply(ctx, b, mutator.SetRunAs()) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, diags.Error()) assert.Len(t, b.Config.Resources.Jobs, 3) jobs := b.Config.Resources.Jobs diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index 05314a846..fde36344f 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -13,97 +13,97 @@ import ( func TestVariables(t *testing.T) { t.Setenv("BUNDLE_VAR_b", "def") b := load(t, "./variables/vanilla") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SetVariables(), mutator.ResolveVariableReferences( "variables", ), )) - require.NoError(t, err) + require.NoError(t, diags.Error()) assert.Equal(t, "abc def", b.Config.Bundle.Name) } func TestVariablesLoadingFailsWhenRequiredVariableIsNotSpecified(t *testing.T) { b := load(t, "./variables/vanilla") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SetVariables(), mutator.ResolveVariableReferences( "variables", ), )) - assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") + assert.ErrorContains(t, diags.Error(), "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") } func TestVariablesTargetsBlockOverride(t *testing.T) { b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-with-single-variable-override"), mutator.SetVariables(), mutator.ResolveVariableReferences( "variables", ), )) - require.NoError(t, err) + require.NoError(t, diags.Error()) assert.Equal(t, "default-a dev-b", b.Config.Workspace.Profile) } func TestVariablesTargetsBlockOverrideForMultipleVariables(t *testing.T) { b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-with-two-variable-overrides"), mutator.SetVariables(), mutator.ResolveVariableReferences( "variables", ), )) - require.NoError(t, err) + require.NoError(t, diags.Error()) assert.Equal(t, "prod-a prod-b", b.Config.Workspace.Profile) } func TestVariablesTargetsBlockOverrideWithProcessEnvVars(t *testing.T) { t.Setenv("BUNDLE_VAR_b", "env-var-b") b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-with-two-variable-overrides"), mutator.SetVariables(), mutator.ResolveVariableReferences( "variables", ), )) - require.NoError(t, err) + require.NoError(t, diags.Error()) assert.Equal(t, "prod-a env-var-b", b.Config.Workspace.Profile) } func TestVariablesTargetsBlockOverrideWithMissingVariables(t *testing.T) { b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-missing-a-required-variable-assignment"), mutator.SetVariables(), mutator.ResolveVariableReferences( "variables", ), )) - assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") + assert.ErrorContains(t, diags.Error(), "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") } func TestVariablesTargetsBlockOverrideWithUndefinedVariables(t *testing.T) { b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-using-an-undefined-variable"), mutator.SetVariables(), mutator.ResolveVariableReferences( "variables", ), )) - assert.ErrorContains(t, err, "variable c is not defined but is assigned a value") + assert.ErrorContains(t, diags.Error(), "variable c is not defined but is assigned a value") } func TestVariablesWithoutDefinition(t *testing.T) { t.Setenv("BUNDLE_VAR_a", "foo") t.Setenv("BUNDLE_VAR_b", "bar") b := load(t, "./variables/without_definition") - err := bundle.Apply(context.Background(), b, mutator.SetVariables()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.SetVariables()) + require.NoError(t, diags.Error()) require.True(t, b.Config.Variables["a"].HasValue()) require.True(t, b.Config.Variables["b"].HasValue()) assert.Equal(t, "foo", *b.Config.Variables["a"].Value) @@ -112,11 +112,11 @@ func TestVariablesWithoutDefinition(t *testing.T) { func TestVariablesWithTargetLookupOverrides(t *testing.T) { b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-overrides-lookup"), mutator.SetVariables(), )) - require.NoError(t, err) + require.NoError(t, diags.Error()) assert.Equal(t, "cluster: some-test-cluster", b.Config.Variables["d"].Lookup.String()) assert.Equal(t, "instance-pool: some-test-instance-pool", b.Config.Variables["e"].Lookup.String()) } diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index 0ba8a187a..8b8cb9f2e 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/diag" "github.com/spf13/cobra" ) @@ -31,7 +32,7 @@ func newDeployCommand() *cobra.Command { ctx := cmd.Context() b := bundle.Get(ctx) - bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) error { + bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) diag.Diagnostics { b.Config.Bundle.Force = force b.Config.Bundle.Deployment.Lock.Force = forceLock if cmd.Flag("compute-id").Changed { @@ -45,11 +46,15 @@ func newDeployCommand() *cobra.Command { return nil }) - return bundle.Apply(ctx, b, bundle.Seq( + diags := bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), phases.Build(), phases.Deploy(), )) + if err := diags.Error(); err != nil { + return err + } + return nil } return cmd diff --git a/cmd/bundle/deployment/bind.go b/cmd/bundle/deployment/bind.go index 184cac1d1..11c560b12 100644 --- a/cmd/bundle/deployment/bind.go +++ b/cmd/bundle/deployment/bind.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/spf13/cobra" ) @@ -44,12 +45,12 @@ func newBindCommand() *cobra.Command { return fmt.Errorf("%s with an id '%s' is not found", resource.TerraformResourceName(), args[1]) } - bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) error { + bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) diag.Diagnostics { b.Config.Bundle.Deployment.Lock.Force = forceLock return nil }) - err = bundle.Apply(ctx, b, bundle.Seq( + diags := bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), phases.Bind(&terraform.BindOptions{ AutoApprove: autoApprove, @@ -58,7 +59,7 @@ func newBindCommand() *cobra.Command { ResourceId: args[1], }), )) - if err != nil { + if err := diags.Error(); err != nil { return fmt.Errorf("failed to bind the resource, err: %w", err) } diff --git a/cmd/bundle/deployment/unbind.go b/cmd/bundle/deployment/unbind.go index b5fb69200..76727877f 100644 --- a/cmd/bundle/deployment/unbind.go +++ b/cmd/bundle/deployment/unbind.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/diag" "github.com/spf13/cobra" ) @@ -29,15 +30,19 @@ func newUnbindCommand() *cobra.Command { return err } - bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) error { + bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) diag.Diagnostics { b.Config.Bundle.Deployment.Lock.Force = forceLock return nil }) - return bundle.Apply(cmd.Context(), b, bundle.Seq( + diags := bundle.Apply(cmd.Context(), b, bundle.Seq( phases.Initialize(), phases.Unbind(resource.TerraformResourceName(), args[0]), )) + if err := diags.Error(); err != nil { + return err + } + return nil } return cmd diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index dc5ea45f8..38b717713 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" "golang.org/x/term" @@ -32,7 +33,7 @@ func newDestroyCommand() *cobra.Command { ctx := cmd.Context() b := bundle.Get(ctx) - bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // If `--force-lock` is specified, force acquisition of the deployment lock. b.Config.Bundle.Deployment.Lock.Force = forceDestroy @@ -57,11 +58,15 @@ func newDestroyCommand() *cobra.Command { return fmt.Errorf("please specify --auto-approve since selected logging format is json") } - return bundle.Apply(ctx, b, bundle.Seq( + diags := bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), phases.Build(), phases.Destroy(), )) + if err := diags.Error(); err != nil { + return err + } + return nil } return cmd diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 9b4ad5c8d..87ea8610c 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -35,14 +35,14 @@ func newRunCommand() *cobra.Command { ctx := cmd.Context() b := bundle.Get(ctx) - err := bundle.Apply(ctx, b, bundle.Seq( + diags := bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), terraform.Interpolate(), terraform.Write(), terraform.StatePull(), terraform.Load(terraform.ErrorOnEmptyState), )) - if err != nil { + if err := diags.Error(); err != nil { return err } diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index 68354a0a2..a28ceede9 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -33,8 +33,8 @@ func newSummaryCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { b := bundle.Get(cmd.Context()) - err := bundle.Apply(cmd.Context(), b, phases.Initialize()) - if err != nil { + diags := bundle.Apply(cmd.Context(), b, phases.Initialize()) + if err := diags.Error(); err != nil { return err } @@ -47,18 +47,18 @@ func newSummaryCommand() *cobra.Command { noCache := errors.Is(stateFileErr, os.ErrNotExist) || errors.Is(configFileErr, os.ErrNotExist) if forcePull || noCache { - err = bundle.Apply(cmd.Context(), b, bundle.Seq( + diags = bundle.Apply(cmd.Context(), b, bundle.Seq( terraform.StatePull(), terraform.Interpolate(), terraform.Write(), )) - if err != nil { + if err := diags.Error(); err != nil { return err } } - err = bundle.Apply(cmd.Context(), b, terraform.Load()) - if err != nil { + diags = bundle.Apply(cmd.Context(), b, terraform.Load()) + if err := diags.Error(); err != nil { return err } diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index 0b7ab4473..0b7f9b3a9 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -49,8 +49,8 @@ func newSyncCommand() *cobra.Command { b := bundle.Get(cmd.Context()) // Run initialize phase to make sure paths are set. - err := bundle.Apply(cmd.Context(), b, phases.Initialize()) - if err != nil { + diags := bundle.Apply(cmd.Context(), b, phases.Initialize()) + if err := diags.Error(); err != nil { return err } diff --git a/cmd/bundle/utils/utils.go b/cmd/bundle/utils/utils.go index e900f47c3..e53a40b9d 100644 --- a/cmd/bundle/utils/utils.go +++ b/cmd/bundle/utils/utils.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/diag" "github.com/spf13/cobra" ) @@ -22,7 +23,9 @@ func ConfigureBundleWithVariables(cmd *cobra.Command, args []string) error { // Initialize variables by assigning them values passed as command line flags b := bundle.Get(cmd.Context()) - return bundle.ApplyFunc(cmd.Context(), b, func(ctx context.Context, b *bundle.Bundle) error { - return b.Config.InitializeVariables(variables) + diags := bundle.ApplyFunc(cmd.Context(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.InitializeVariables(variables) + return diag.FromErr(err) }) + return diags.Error() } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index a650fcfde..42686b328 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -22,8 +22,8 @@ func newValidateCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { b := bundle.Get(cmd.Context()) - err := bundle.Apply(cmd.Context(), b, phases.Initialize()) - if err != nil { + diags := bundle.Apply(cmd.Context(), b, phases.Initialize()) + if err := diags.Error(); err != nil { return err } diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index edfc1f431..6a6aeb4d2 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/libs/diag" envlib "github.com/databricks/cli/libs/env" "github.com/spf13/cobra" "golang.org/x/exp/maps" @@ -64,17 +65,17 @@ func loadBundle(cmd *cobra.Command, args []string, load func(ctx context.Context profile := getProfile(cmd) if profile != "" { - err = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { b.Config.Workspace.Profile = profile return nil }) - if err != nil { + if err := diags.Error(); err != nil { return nil, err } } - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) - if err != nil { + diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + if err := diags.Error(); err != nil { return nil, err } @@ -102,8 +103,8 @@ func configureBundle(cmd *cobra.Command, args []string, load func(ctx context.Co } ctx := cmd.Context() - err = bundle.Apply(ctx, b, m) - if err != nil { + diags := bundle.Apply(ctx, b, m) + if err := diags.Error(); err != nil { return err } diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 0f3769ece..2ced12fdd 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -74,8 +74,8 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { }, } - err := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) + require.NoError(t, diags.Error()) // The remote path attribute on the artifact file should have been set. require.Regexp(t, diff --git a/libs/diag/diagnostic.go b/libs/diag/diagnostic.go index 02d2e7c17..68b4ad611 100644 --- a/libs/diag/diagnostic.go +++ b/libs/diag/diagnostic.go @@ -32,6 +32,19 @@ func Errorf(format string, args ...any) Diagnostics { } } +// FromErr returns a new error diagnostic from the specified error, if any. +func FromErr(err error) Diagnostics { + if err == nil { + return nil + } + return []Diagnostic{ + { + Severity: Error, + Summary: err.Error(), + }, + } +} + // Warningf creates a new warning diagnostic. func Warningf(format string, args ...any) Diagnostics { return []Diagnostic{ @@ -74,3 +87,13 @@ func (ds Diagnostics) HasError() bool { } return false } + +// Return first error in the set of diagnostics. +func (ds Diagnostics) Error() error { + for _, d := range ds { + if d.Severity == Error { + return fmt.Errorf(d.Summary) + } + } + return nil +} diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index dc287440c..cad58a532 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -17,6 +17,7 @@ import ( "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/tags" "github.com/databricks/databricks-sdk-go" workspaceConfig "github.com/databricks/databricks-sdk-go/config" @@ -69,7 +70,7 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri require.NoError(t, err) // Apply initialize / validation mutators - bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) error { + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { b.Config.Workspace.CurrentUser = &bundleConfig.User{User: cachedUser} return nil }) @@ -79,17 +80,17 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri b.Config.Bundle.Terraform = &bundleConfig.Terraform{ ExecPath: "sh", } - err = bundle.Apply(ctx, b, bundle.Seq( + diags := bundle.Apply(ctx, b, bundle.Seq( bundle.Seq(mutator.DefaultMutators()...), mutator.SelectTarget(target), phases.Initialize(), )) - require.NoError(t, err) + require.NoError(t, diags.Error()) // Apply build mutator if build { - err = bundle.Apply(ctx, b, phases.Build()) - require.NoError(t, err) + diags = bundle.Apply(ctx, b, phases.Build()) + require.NoError(t, diags.Error()) } } From e3717ba1c43cc423bac5dae8c17489cfebfbb4c3 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 26 Mar 2024 08:57:48 +0100 Subject: [PATCH 097/286] Fix flaky test in `libs/process` (#1314) ## Changes The order of stdout and stderr being read into the buffer for combined output is not deterministic due to scheduling of the underlying goroutines that consume them. That's why this asserts on the contents and not the order. --- libs/process/background_test.go | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/libs/process/background_test.go b/libs/process/background_test.go index 5bf2400bc..2ee6727a0 100644 --- a/libs/process/background_test.go +++ b/libs/process/background_test.go @@ -1,6 +1,7 @@ package process import ( + "bufio" "bytes" "context" "fmt" @@ -12,6 +13,17 @@ import ( "github.com/stretchr/testify/assert" ) +func splitLines(b []byte) (lines []string) { + scan := bufio.NewScanner(bytes.NewReader(b)) + for scan.Scan() { + line := scan.Text() + if line != "" { + lines = append(lines, line) + } + } + return lines +} + func TestBackgroundUnwrapsNotFound(t *testing.T) { ctx := context.Background() _, err := Background(ctx, []string{"/bin/meeecho", "1"}) @@ -46,7 +58,12 @@ func TestBackgroundCombinedOutput(t *testing.T) { }, WithCombinedOutput(&buf)) assert.NoError(t, err) assert.Equal(t, "2", strings.TrimSpace(res)) - assert.Equal(t, "1\n2\n", strings.ReplaceAll(buf.String(), "\r", "")) + + // The order of stdout and stderr being read into the buffer + // for combined output is not deterministic due to scheduling + // of the underlying goroutines that consume them. + // That's why this asserts on the contents and not the order. + assert.ElementsMatch(t, []string{"1", "2"}, splitLines(buf.Bytes())) } func TestBackgroundCombinedOutputFailure(t *testing.T) { @@ -66,10 +83,7 @@ func TestBackgroundCombinedOutputFailure(t *testing.T) { assert.Equal(t, "2", strings.TrimSpace(processErr.Stdout)) } assert.Equal(t, "2", strings.TrimSpace(res)) - - out := strings.ReplaceAll(buf.String(), "\r", "") - assert.Contains(t, out, "1\n") - assert.Contains(t, out, "2\n") + assert.ElementsMatch(t, []string{"1", "2"}, splitLines(buf.Bytes())) } func TestBackgroundNoStdin(t *testing.T) { From b50380471ed3661b25d7c4fe4f37ca433b8d45fe Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 26 Mar 2024 18:32:09 +0530 Subject: [PATCH 098/286] Allow unknown properties in the config file for template initialization (#1315) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes Before we would error if a property was defined in the config file, that was not defined in the schema. ## Tests Unit tests. Also manually that the e2e flow works file. Before: ``` shreyas.goenka@THW32HFW6T playground % cli bundle init default-python --config-file config.json Welcome to the default Python template for Databricks Asset Bundles! Error: failed to load config from file config.json: property include_pytho is not defined in the schema ``` After: ``` shreyas.goenka@THW32HFW6T playground % cli bundle init default-python --config-file config.json Welcome to the default Python template for Databricks Asset Bundles! Workspace to use (auto-detected, edit in 'test/databricks.yml'): https://dbc-a39a1eb1-ef95.cloud.databricks.com ✨ Your new project has been created in the 'test' directory! Please refer to the README.md file for "getting started" instructions. See also the documentation at https://docs.databricks.com/dev-tools/bundles/index.html. ``` --- libs/template/config.go | 13 ++++++++++++- libs/template/config_test.go | 11 +++++++++++ .../config.json | 3 ++- 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/libs/template/config.go b/libs/template/config.go index 970e74ca9..5470aefeb 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -70,8 +70,14 @@ func validateSchema(schema *jsonschema.Schema) error { // Reads json file at path and assigns values from the file func (c *config) assignValuesFromFile(path string) error { - // Load the config file. + // It's valid to set additional properties in the config file that are not + // defined in the schema. They will be filtered below. Thus for the duration of + // the LoadInstance call, we disable the additional properties check, + // to allow those properties to be loaded. + c.schema.AdditionalProperties = true configFromFile, err := c.schema.LoadInstance(path) + c.schema.AdditionalProperties = false + if err != nil { return fmt.Errorf("failed to load config from file %s: %w", path, err) } @@ -79,6 +85,11 @@ func (c *config) assignValuesFromFile(path string) error { // Write configs from the file to the input map, not overwriting any existing // configurations. for name, val := range configFromFile { + // If a property is not defined in the schema, skip it. + if _, ok := c.schema.Properties[name]; !ok { + continue + } + // If a value is already assigned, keep the original value. if _, ok := c.values[name]; ok { continue } diff --git a/libs/template/config_test.go b/libs/template/config_test.go index 847c2615b..1af2e5f5a 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -52,6 +52,17 @@ func TestTemplateConfigAssignValuesFromFileDoesNotOverwriteExistingConfigs(t *te assert.Equal(t, "this-is-not-overwritten", c.values["string_val"]) } +func TestTemplateConfigAssignValuesFromFileFiltersPropertiesNotInTheSchema(t *testing.T) { + c := testConfig(t) + + err := c.assignValuesFromFile("./testdata/config-assign-from-file-unknown-property/config.json") + assert.NoError(t, err) + + // assert only the known property is loaded + assert.Len(t, c.values, 1) + assert.Equal(t, "i am a known property", c.values["string_val"]) +} + func TestTemplateConfigAssignDefaultValues(t *testing.T) { c := testConfig(t) diff --git a/libs/template/testdata/config-assign-from-file-unknown-property/config.json b/libs/template/testdata/config-assign-from-file-unknown-property/config.json index 518eaa6a2..69ed020cf 100644 --- a/libs/template/testdata/config-assign-from-file-unknown-property/config.json +++ b/libs/template/testdata/config-assign-from-file-unknown-property/config.json @@ -1,3 +1,4 @@ { - "unknown_prop": 123 + "unknown_prop": 123, + "string_val": "i am a known property" } From 00d76d5afaa9fa7abfb57907e65e6be82debd3e9 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 27 Mar 2024 10:03:24 +0100 Subject: [PATCH 099/286] Move path field to bundle type (#1316) ## Changes The bundle path was previously stored on the `config.Root` type under the assumption that the first configuration file being loaded would set it. This is slightly counterintuitive and we know what the path is upon construction of the bundle. The new location for this property reflects this. ## Tests Unit tests pass. --- bundle/artifacts/build.go | 2 +- bundle/artifacts/upload_test.go | 4 +-- bundle/artifacts/whl/autodetect.go | 6 ++-- bundle/artifacts/whl/from_libraries.go | 2 +- bundle/bundle.go | 15 +++++--- bundle/bundle_test.go | 4 +-- .../expand_pipeline_glob_paths_test.go | 2 +- bundle/config/mutator/load_git_details.go | 4 +-- bundle/config/mutator/process_include_test.go | 4 +-- .../config/mutator/process_root_includes.go | 8 ++--- .../mutator/process_root_includes_test.go | 34 ++++++++----------- bundle/config/mutator/rewrite_sync_paths.go | 4 +-- .../config/mutator/rewrite_sync_paths_test.go | 10 +++--- bundle/config/mutator/trampoline.go | 2 +- bundle/config/mutator/trampoline_test.go | 2 +- bundle/config/mutator/translate_paths.go | 2 +- bundle/config/mutator/translate_paths_test.go | 24 ++++++------- bundle/config/root.go | 11 +----- bundle/deploy/files/sync.go | 2 +- bundle/deploy/metadata/compute.go | 2 +- bundle/deploy/state_pull.go | 2 +- bundle/deploy/state_pull_test.go | 10 +++--- bundle/deploy/state_push_test.go | 2 +- bundle/deploy/state_update_test.go | 12 +++---- bundle/deploy/terraform/init_test.go | 14 ++++---- bundle/deploy/terraform/load_test.go | 2 +- bundle/deploy/terraform/state_pull_test.go | 2 +- bundle/deploy/terraform/state_push_test.go | 2 +- bundle/libraries/libraries.go | 2 +- bundle/libraries/libraries_test.go | 2 +- bundle/python/conditional_transform_test.go | 6 ++-- bundle/python/transform_test.go | 2 +- bundle/root_test.go | 4 +-- bundle/scripts/scripts.go | 2 +- bundle/scripts/scripts_test.go | 2 +- bundle/tests/python_wheel_test.go | 4 +-- cmd/bundle/generate/generate_test.go | 9 ++--- cmd/sync/sync_test.go | 3 +- internal/bundle/artifacts_test.go | 2 +- 39 files changed, 104 insertions(+), 124 deletions(-) diff --git a/bundle/artifacts/build.go b/bundle/artifacts/build.go index f3ee097c2..349b1ff89 100644 --- a/bundle/artifacts/build.go +++ b/bundle/artifacts/build.go @@ -46,7 +46,7 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // If artifact path is not provided, use bundle root dir if artifact.Path == "" { - artifact.Path = b.Config.Path + artifact.Path = b.RootPath } if !filepath.IsAbs(artifact.Path) { diff --git a/bundle/artifacts/upload_test.go b/bundle/artifacts/upload_test.go index ec7110095..687d73b4a 100644 --- a/bundle/artifacts/upload_test.go +++ b/bundle/artifacts/upload_test.go @@ -36,8 +36,8 @@ func TestExpandGlobFilesSource(t *testing.T) { t2.Close(t) b := &bundle.Bundle{ + RootPath: rootPath, Config: config.Root{ - Path: rootPath, Artifacts: map[string]*config.Artifact{ "test": { Type: "custom", @@ -72,8 +72,8 @@ func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) { require.NoError(t, err) b := &bundle.Bundle{ + RootPath: rootPath, Config: config.Root{ - Path: rootPath, Artifacts: map[string]*config.Artifact{ "test": { Type: "custom", diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index d11db8311..ee77fff01 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -35,21 +35,21 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic log.Infof(ctx, "Detecting Python wheel project...") // checking if there is setup.py in the bundle root - setupPy := filepath.Join(b.Config.Path, "setup.py") + setupPy := filepath.Join(b.RootPath, "setup.py") _, err := os.Stat(setupPy) if err != nil { log.Infof(ctx, "No Python wheel project found at bundle root folder") return nil } - log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.Config.Path)) + log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.RootPath)) module := extractModuleName(setupPy) if b.Config.Artifacts == nil { b.Config.Artifacts = make(map[string]*config.Artifact) } - pkgPath, err := filepath.Abs(b.Config.Path) + pkgPath, err := filepath.Abs(b.RootPath) if err != nil { return diag.FromErr(err) } diff --git a/bundle/artifacts/whl/from_libraries.go b/bundle/artifacts/whl/from_libraries.go index a2045aaf8..84ef712ac 100644 --- a/bundle/artifacts/whl/from_libraries.go +++ b/bundle/artifacts/whl/from_libraries.go @@ -30,7 +30,7 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost tasks := libraries.FindAllWheelTasksWithLocalLibraries(b) for _, task := range tasks { for _, lib := range task.Libraries { - matches, err := filepath.Glob(filepath.Join(b.Config.Path, lib.Whl)) + matches, err := filepath.Glob(filepath.Join(b.RootPath, lib.Whl)) // File referenced from libraries section does not exists, skipping if err != nil { continue diff --git a/bundle/bundle.go b/bundle/bundle.go index a178ea090..0aa44df0b 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -30,6 +30,10 @@ import ( const internalFolder = ".internal" type Bundle struct { + // RootPath contains the directory path to the root of the bundle. + // It is set when we instantiate a new bundle instance. + RootPath string + Config config.Root // Metadata about the bundle deployment. This is the interface Databricks services @@ -63,7 +67,9 @@ type Bundle struct { } func Load(ctx context.Context, path string) (*Bundle, error) { - b := &Bundle{} + b := &Bundle{ + RootPath: filepath.Clean(path), + } stat, err := os.Stat(path) if err != nil { return nil, err @@ -75,7 +81,6 @@ func Load(ctx context.Context, path string) (*Bundle, error) { if hasRootEnv && hasIncludesEnv && stat.IsDir() { log.Debugf(ctx, "No bundle configuration; using bundle root: %s", path) b.Config = config.Root{ - Path: path, Bundle: config.Bundle{ Name: filepath.Base(path), }, @@ -158,7 +163,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error) if !exists || cacheDirName == "" { cacheDirName = filepath.Join( // Anchor at bundle root directory. - b.Config.Path, + b.RootPath, // Static cache directory. ".databricks", "bundle", @@ -210,7 +215,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { if err != nil { return nil, err } - internalDirRel, err := filepath.Rel(b.Config.Path, internalDir) + internalDirRel, err := filepath.Rel(b.RootPath, internalDir) if err != nil { return nil, err } @@ -218,7 +223,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { } func (b *Bundle) GitRepository() (*git.Repository, error) { - rootPath, err := folders.FindDirWithLeaf(b.Config.Path, ".git") + rootPath, err := folders.FindDirWithLeaf(b.RootPath, ".git") if err != nil { return nil, fmt.Errorf("unable to locate repository root: %w", err) } diff --git a/bundle/bundle_test.go b/bundle/bundle_test.go index 887a4ee83..be716a40a 100644 --- a/bundle/bundle_test.go +++ b/bundle/bundle_test.go @@ -77,7 +77,7 @@ func TestBundleMustLoadSuccess(t *testing.T) { t.Setenv(env.RootVariable, "./tests/basic") b, err := MustLoad(context.Background()) require.NoError(t, err) - assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path)) + assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath)) } func TestBundleMustLoadFailureWithEnv(t *testing.T) { @@ -96,7 +96,7 @@ func TestBundleTryLoadSuccess(t *testing.T) { t.Setenv(env.RootVariable, "./tests/basic") b, err := TryLoad(context.Background()) require.NoError(t, err) - assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path)) + assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath)) } func TestBundleTryLoadFailureWithEnv(t *testing.T) { diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go index db80be028..d1671c256 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths_test.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -41,8 +41,8 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "skip/test7.py")) b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index 6ff9aad62..7ce8476f1 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -22,7 +22,7 @@ func (m *loadGitDetails) Name() string { func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Load relevant git repository - repo, err := git.NewRepository(b.Config.Path) + repo, err := git.NewRepository(b.RootPath) if err != nil { return diag.FromErr(err) } @@ -56,7 +56,7 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn } // Compute relative path of the bundle root from the Git repo root. - absBundlePath, err := filepath.Abs(b.Config.Path) + absBundlePath, err := filepath.Abs(b.RootPath) if err != nil { return diag.FromErr(err) } diff --git a/bundle/config/mutator/process_include_test.go b/bundle/config/mutator/process_include_test.go index 0e5351b63..b4fa3ccda 100644 --- a/bundle/config/mutator/process_include_test.go +++ b/bundle/config/mutator/process_include_test.go @@ -16,8 +16,8 @@ import ( func TestProcessInclude(t *testing.T) { b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Workspace: config.Workspace{ Host: "foo", }, @@ -25,7 +25,7 @@ func TestProcessInclude(t *testing.T) { } relPath := "./file.yml" - fullPath := filepath.Join(b.Config.Path, relPath) + fullPath := filepath.Join(b.RootPath, relPath) f, err := os.Create(fullPath) require.NoError(t, err) fmt.Fprint(f, "workspace:\n host: bar\n") diff --git a/bundle/config/mutator/process_root_includes.go b/bundle/config/mutator/process_root_includes.go index dbf99f2dc..4e4aeef43 100644 --- a/bundle/config/mutator/process_root_includes.go +++ b/bundle/config/mutator/process_root_includes.go @@ -51,7 +51,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag. // Converts extra include paths from environment variable to relative paths for _, extraIncludePath := range getExtraIncludePaths(ctx) { if filepath.IsAbs(extraIncludePath) { - rel, err := filepath.Rel(b.Config.Path, extraIncludePath) + rel, err := filepath.Rel(b.RootPath, extraIncludePath) if err != nil { return diag.Errorf("unable to include file '%s': %v", extraIncludePath, err) } @@ -70,7 +70,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag. } // Anchor includes to the bundle root path. - matches, err := filepath.Glob(filepath.Join(b.Config.Path, entry)) + matches, err := filepath.Glob(filepath.Join(b.RootPath, entry)) if err != nil { return diag.FromErr(err) } @@ -84,7 +84,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag. // Filter matches to ones we haven't seen yet. var includes []string for _, match := range matches { - rel, err := filepath.Rel(b.Config.Path, match) + rel, err := filepath.Rel(b.RootPath, match) if err != nil { return diag.FromErr(err) } @@ -99,7 +99,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag. slices.Sort(includes) files = append(files, includes...) for _, include := range includes { - out = append(out, ProcessInclude(filepath.Join(b.Config.Path, include), include)) + out = append(out, ProcessInclude(filepath.Join(b.RootPath, include), include)) } } diff --git a/bundle/config/mutator/process_root_includes_test.go b/bundle/config/mutator/process_root_includes_test.go index 7b2194553..d3aaa974d 100644 --- a/bundle/config/mutator/process_root_includes_test.go +++ b/bundle/config/mutator/process_root_includes_test.go @@ -19,9 +19,7 @@ import ( func TestProcessRootIncludesEmpty(t *testing.T) { b := &bundle.Bundle{ - Config: config.Root{ - Path: ".", - }, + RootPath: ".", } diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, diags.Error()) @@ -36,8 +34,8 @@ func TestProcessRootIncludesAbs(t *testing.T) { } b := &bundle.Bundle{ + RootPath: ".", Config: config.Root{ - Path: ".", Include: []string{ "/tmp/*.yml", }, @@ -50,17 +48,17 @@ func TestProcessRootIncludesAbs(t *testing.T) { func TestProcessRootIncludesSingleGlob(t *testing.T) { b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Include: []string{ "*.yml", }, }, } - testutil.Touch(t, b.Config.Path, "databricks.yml") - testutil.Touch(t, b.Config.Path, "a.yml") - testutil.Touch(t, b.Config.Path, "b.yml") + testutil.Touch(t, b.RootPath, "databricks.yml") + testutil.Touch(t, b.RootPath, "a.yml") + testutil.Touch(t, b.RootPath, "b.yml") diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, diags.Error()) @@ -69,8 +67,8 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) { func TestProcessRootIncludesMultiGlob(t *testing.T) { b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Include: []string{ "a*.yml", "b*.yml", @@ -78,8 +76,8 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) { }, } - testutil.Touch(t, b.Config.Path, "a1.yml") - testutil.Touch(t, b.Config.Path, "b1.yml") + testutil.Touch(t, b.RootPath, "a1.yml") + testutil.Touch(t, b.RootPath, "b1.yml") diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, diags.Error()) @@ -88,8 +86,8 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) { func TestProcessRootIncludesRemoveDups(t *testing.T) { b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Include: []string{ "*.yml", "*.yml", @@ -97,7 +95,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) { }, } - testutil.Touch(t, b.Config.Path, "a.yml") + testutil.Touch(t, b.RootPath, "a.yml") diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) require.NoError(t, diags.Error()) @@ -106,8 +104,8 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) { func TestProcessRootIncludesNotExists(t *testing.T) { b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Include: []string{ "notexist.yml", }, @@ -125,9 +123,7 @@ func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) { t.Setenv(env.IncludesVariable, path.Join(rootPath, testYamlName)) b := &bundle.Bundle{ - Config: config.Root{ - Path: rootPath, - }, + RootPath: rootPath, } diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) @@ -148,9 +144,7 @@ func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) { )) b := &bundle.Bundle{ - Config: config.Root{ - Path: rootPath, - }, + RootPath: rootPath, } diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) diff --git a/bundle/config/mutator/rewrite_sync_paths.go b/bundle/config/mutator/rewrite_sync_paths.go index 0785c6430..710190230 100644 --- a/bundle/config/mutator/rewrite_sync_paths.go +++ b/bundle/config/mutator/rewrite_sync_paths.go @@ -45,11 +45,11 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc { func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) { - v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.Config.Path))) + v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath))) if err != nil { return dyn.NilValue, err } - v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.Config.Path))) + v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.RootPath))) if err != nil { return dyn.NilValue, err } diff --git a/bundle/config/mutator/rewrite_sync_paths_test.go b/bundle/config/mutator/rewrite_sync_paths_test.go index 667f811ac..56ada19e6 100644 --- a/bundle/config/mutator/rewrite_sync_paths_test.go +++ b/bundle/config/mutator/rewrite_sync_paths_test.go @@ -14,8 +14,8 @@ import ( func TestRewriteSyncPathsRelative(t *testing.T) { b := &bundle.Bundle{ + RootPath: ".", Config: config.Root{ - Path: ".", Sync: config.Sync{ Include: []string{ "foo", @@ -45,8 +45,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) { func TestRewriteSyncPathsAbsolute(t *testing.T) { b := &bundle.Bundle{ + RootPath: "/tmp/dir", Config: config.Root{ - Path: "/tmp/dir", Sync: config.Sync{ Include: []string{ "foo", @@ -77,9 +77,7 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) { func TestRewriteSyncPathsErrorPaths(t *testing.T) { t.Run("no sync block", func(t *testing.T) { b := &bundle.Bundle{ - Config: config.Root{ - Path: ".", - }, + RootPath: ".", } diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) @@ -88,8 +86,8 @@ func TestRewriteSyncPathsErrorPaths(t *testing.T) { t.Run("empty include/exclude blocks", func(t *testing.T) { b := &bundle.Bundle{ + RootPath: ".", Config: config.Root{ - Path: ".", Sync: config.Sync{ Include: []string{}, Exclude: []string{}, diff --git a/bundle/config/mutator/trampoline.go b/bundle/config/mutator/trampoline.go index 72c053b59..dde9a299e 100644 --- a/bundle/config/mutator/trampoline.go +++ b/bundle/config/mutator/trampoline.go @@ -82,7 +82,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund return err } - internalDirRel, err := filepath.Rel(b.Config.Path, internalDir) + internalDirRel, err := filepath.Rel(b.RootPath, internalDir) if err != nil { return err } diff --git a/bundle/config/mutator/trampoline_test.go b/bundle/config/mutator/trampoline_test.go index 8a375aa9b..e39076647 100644 --- a/bundle/config/mutator/trampoline_test.go +++ b/bundle/config/mutator/trampoline_test.go @@ -57,8 +57,8 @@ func TestGenerateTrampoline(t *testing.T) { } b := &bundle.Bundle{ + RootPath: tmpDir, Config: config.Root{ - Path: tmpDir, Bundle: config.Bundle{ Target: "development", }, diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index af6896ee0..8fab3abb3 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -85,7 +85,7 @@ func (m *translatePaths) rewritePath( } // Remote path must be relative to the bundle root. - localRelPath, err := filepath.Rel(b.Config.Path, localPath) + localRelPath, err := filepath.Rel(b.RootPath, localPath) if err != nil { return err } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index bd2ec809b..9650ae8ba 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -36,8 +36,8 @@ func touchEmptyFile(t *testing.T, path string) { func TestTranslatePathsSkippedWithGitSource(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, @@ -106,8 +106,8 @@ func TestTranslatePaths(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar")) b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, @@ -273,8 +273,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml")) b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, @@ -367,8 +367,8 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, @@ -400,8 +400,8 @@ func TestJobNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { @@ -430,8 +430,8 @@ func TestJobFileDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { @@ -460,8 +460,8 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { @@ -490,8 +490,8 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { @@ -521,8 +521,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, @@ -555,8 +555,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_file.py")) b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, @@ -589,8 +589,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_file.py")) b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, @@ -623,8 +623,8 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, diff --git a/bundle/config/root.go b/bundle/config/root.go index 8e1ff6507..a3dd0d28b 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "os" - "path/filepath" "strings" "github.com/databricks/cli/bundle/config/resources" @@ -24,10 +23,6 @@ type Root struct { diags diag.Diagnostics depth int - // Path contains the directory path to the root of the bundle. - // It is set when loading `databricks.yml`. - Path string `json:"-" bundle:"readonly"` - // Contains user defined variables Variables map[string]*variable.Variable `json:"variables,omitempty"` @@ -80,9 +75,7 @@ func Load(path string) (*Root, error) { return nil, err } - r := Root{ - Path: filepath.Dir(path), - } + r := Root{} // Load configuration tree from YAML. v, err := yamlloader.LoadYAML(path, bytes.NewBuffer(raw)) @@ -135,12 +128,10 @@ func (r *Root) updateWithDynamicValue(nv dyn.Value) error { // the configuration equals nil (happens in tests). diags := r.diags depth := r.depth - path := r.Path defer func() { r.diags = diags r.depth = depth - r.Path = path }() // Convert normalized configuration tree to typed configuration. diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index 8de80c22f..e8c54c633 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -28,7 +28,7 @@ func GetSyncOptions(ctx context.Context, b *bundle.Bundle) (*sync.SyncOptions, e } opts := &sync.SyncOptions{ - LocalPath: b.Config.Path, + LocalPath: b.RootPath, RemotePath: b.Config.Workspace.FilePath, Include: includes, Exclude: b.Config.Sync.Exclude, diff --git a/bundle/deploy/metadata/compute.go b/bundle/deploy/metadata/compute.go index 5a46cd67f..034765484 100644 --- a/bundle/deploy/metadata/compute.go +++ b/bundle/deploy/metadata/compute.go @@ -39,7 +39,7 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { for name, job := range b.Config.Resources.Jobs { // Compute config file path the job is defined in, relative to the bundle // root - relativePath, err := filepath.Rel(b.Config.Path, job.ConfigFilePath) + relativePath, err := filepath.Rel(b.RootPath, job.ConfigFilePath) if err != nil { return diag.Errorf("failed to compute relative path for job %s: %v", name, err) } diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go index 61f5426a0..bae457ea0 100644 --- a/bundle/deploy/state_pull.go +++ b/bundle/deploy/state_pull.go @@ -85,7 +85,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic } log.Infof(ctx, "Creating new snapshot") - snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.Config.Path), opts) + snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.RootPath), opts) if err != nil { return diag.FromErr(err) } diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index 9716a1e04..80acb254f 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -59,8 +59,8 @@ func testStatePull(t *testing.T, opts statePullOpts) { }} b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "default", }, @@ -77,11 +77,11 @@ func testStatePull(t *testing.T, opts statePullOpts) { ctx := context.Background() for _, file := range opts.localFiles { - testutil.Touch(t, filepath.Join(b.Config.Path, "bar"), file) + testutil.Touch(t, filepath.Join(b.RootPath, "bar"), file) } for _, file := range opts.localNotebooks { - testutil.TouchNotebook(t, filepath.Join(b.Config.Path, "bar"), file) + testutil.TouchNotebook(t, filepath.Join(b.RootPath, "bar"), file) } if opts.withExistingSnapshot { @@ -251,8 +251,8 @@ func TestStatePullNoState(t *testing.T) { }} b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "default", }, @@ -439,8 +439,8 @@ func TestStatePullNewerDeploymentStateVersion(t *testing.T) { }} b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "default", }, diff --git a/bundle/deploy/state_push_test.go b/bundle/deploy/state_push_test.go index c6d9f88f5..39e4d13a5 100644 --- a/bundle/deploy/state_push_test.go +++ b/bundle/deploy/state_push_test.go @@ -45,8 +45,8 @@ func TestStatePush(t *testing.T) { }} b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "default", }, diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go index 73b7fe4b3..dd8a1336e 100644 --- a/bundle/deploy/state_update_test.go +++ b/bundle/deploy/state_update_test.go @@ -22,8 +22,8 @@ func TestStateUpdate(t *testing.T) { s := &stateUpdate{} b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "default", }, @@ -39,8 +39,8 @@ func TestStateUpdate(t *testing.T) { }, } - testutil.Touch(t, b.Config.Path, "test1.py") - testutil.Touch(t, b.Config.Path, "test2.py") + testutil.Touch(t, b.RootPath, "test1.py") + testutil.Touch(t, b.RootPath, "test2.py") m := mocks.NewMockWorkspaceClient(t) m.WorkspaceClient.Config = &databrickscfg.Config{ @@ -82,8 +82,8 @@ func TestStateUpdateWithExistingState(t *testing.T) { s := &stateUpdate{} b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "default", }, @@ -99,8 +99,8 @@ func TestStateUpdateWithExistingState(t *testing.T) { }, } - testutil.Touch(t, b.Config.Path, "test1.py") - testutil.Touch(t, b.Config.Path, "test2.py") + testutil.Touch(t, b.RootPath, "test1.py") + testutil.Touch(t, b.RootPath, "test2.py") m := mocks.NewMockWorkspaceClient(t) m.WorkspaceClient.Config = &databrickscfg.Config{ diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index bbef7f0f7..29bd80a3e 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -28,8 +28,8 @@ func TestInitEnvironmentVariables(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", Terraform: &config.Terraform{ @@ -55,8 +55,8 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -83,8 +83,8 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirNotSet(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -109,8 +109,8 @@ func TestSetTempDirEnvVarsForWindowWithAllTmpDirEnvVarsSet(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -139,8 +139,8 @@ func TestSetTempDirEnvVarsForWindowWithUserProfileAndTempSet(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -169,8 +169,8 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -197,8 +197,8 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) { func TestSetProxyEnvVars(t *testing.T) { b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, diff --git a/bundle/deploy/terraform/load_test.go b/bundle/deploy/terraform/load_test.go index a912c5213..c62217187 100644 --- a/bundle/deploy/terraform/load_test.go +++ b/bundle/deploy/terraform/load_test.go @@ -17,8 +17,8 @@ func TestLoadWithNoState(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", Terraform: &config.Terraform{ diff --git a/bundle/deploy/terraform/state_pull_test.go b/bundle/deploy/terraform/state_pull_test.go index 805b5af0f..26297bfcb 100644 --- a/bundle/deploy/terraform/state_pull_test.go +++ b/bundle/deploy/terraform/state_pull_test.go @@ -32,11 +32,11 @@ func mockStateFilerForPull(t *testing.T, contents map[string]int, merr error) fi func statePullTestBundle(t *testing.T) *bundle.Bundle { return &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ Bundle: config.Bundle{ Target: "default", }, - Path: t.TempDir(), }, } } diff --git a/bundle/deploy/terraform/state_push_test.go b/bundle/deploy/terraform/state_push_test.go index 41d384900..e054773f3 100644 --- a/bundle/deploy/terraform/state_push_test.go +++ b/bundle/deploy/terraform/state_push_test.go @@ -29,11 +29,11 @@ func mockStateFilerForPush(t *testing.T, fn func(body io.Reader)) filer.Filer { func statePushTestBundle(t *testing.T) *bundle.Bundle { return &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ Bundle: config.Bundle{ Target: "default", }, - Path: t.TempDir(), }, } } diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index e0cb3fa38..8dd63a75a 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -65,7 +65,7 @@ func findLibraryMatches(lib *compute.Library, b *bundle.Bundle) ([]string, error return nil, nil } - fullPath := filepath.Join(b.Config.Path, path) + fullPath := filepath.Join(b.RootPath, path) return filepath.Glob(fullPath) } diff --git a/bundle/libraries/libraries_test.go b/bundle/libraries/libraries_test.go index 0bec2c6d0..3da10d47b 100644 --- a/bundle/libraries/libraries_test.go +++ b/bundle/libraries/libraries_test.go @@ -15,8 +15,8 @@ import ( func TestMapFilesToTaskLibrariesNoGlob(t *testing.T) { b := &bundle.Bundle{ + RootPath: "testdata", Config: config.Root{ - Path: "testdata", Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job1": { diff --git a/bundle/python/conditional_transform_test.go b/bundle/python/conditional_transform_test.go index b4d7f9edb..677970d70 100644 --- a/bundle/python/conditional_transform_test.go +++ b/bundle/python/conditional_transform_test.go @@ -18,8 +18,8 @@ func TestNoTransformByDefault(t *testing.T) { tmpDir := t.TempDir() b := &bundle.Bundle{ + RootPath: tmpDir, Config: config.Root{ - Path: tmpDir, Bundle: config.Bundle{ Target: "development", }, @@ -63,8 +63,8 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { tmpDir := t.TempDir() b := &bundle.Bundle{ + RootPath: tmpDir, Config: config.Root{ - Path: tmpDir, Bundle: config.Bundle{ Target: "development", }, @@ -106,7 +106,7 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { dir, err := b.InternalDir(context.Background()) require.NoError(t, err) - internalDirRel, err := filepath.Rel(b.Config.Path, dir) + internalDirRel, err := filepath.Rel(b.RootPath, dir) require.NoError(t, err) require.Equal(t, path.Join(filepath.ToSlash(internalDirRel), "notebook_job1_key1"), task.NotebookTask.NotebookPath) diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index 729efe1a9..c15feb424 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -116,8 +116,8 @@ func TestTransformFiltersWheelTasksOnly(t *testing.T) { func TestNoPanicWithNoPythonWheelTasks(t *testing.T) { tmpDir := t.TempDir() b := &bundle.Bundle{ + RootPath: tmpDir, Config: config.Root{ - Path: tmpDir, Bundle: config.Bundle{ Target: "development", }, diff --git a/bundle/root_test.go b/bundle/root_test.go index e6c53e824..a83f36ace 100644 --- a/bundle/root_test.go +++ b/bundle/root_test.go @@ -106,7 +106,7 @@ func TestLoadYamlWhenIncludesEnvPresent(t *testing.T) { cwd, err := os.Getwd() assert.NoError(t, err) - assert.Equal(t, cwd, bundle.Config.Path) + assert.Equal(t, cwd, bundle.RootPath) } func TestLoadDefautlBundleWhenNoYamlAndRootAndIncludesEnvPresent(t *testing.T) { @@ -118,7 +118,7 @@ func TestLoadDefautlBundleWhenNoYamlAndRootAndIncludesEnvPresent(t *testing.T) { bundle, err := MustLoad(ctx) assert.NoError(t, err) - assert.Equal(t, dir, bundle.Config.Path) + assert.Equal(t, dir, bundle.RootPath) } func TestErrorIfNoYamlNoRootEnvAndIncludesEnvPresent(t *testing.T) { diff --git a/bundle/scripts/scripts.go b/bundle/scripts/scripts.go index f8ed7d6a3..38d204f99 100644 --- a/bundle/scripts/scripts.go +++ b/bundle/scripts/scripts.go @@ -30,7 +30,7 @@ func (m *script) Name() string { } func (m *script) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - executor, err := exec.NewCommandExecutor(b.Config.Path) + executor, err := exec.NewCommandExecutor(b.RootPath) if err != nil { return diag.FromErr(err) } diff --git a/bundle/scripts/scripts_test.go b/bundle/scripts/scripts_test.go index fa5c23970..1bc216b61 100644 --- a/bundle/scripts/scripts_test.go +++ b/bundle/scripts/scripts_test.go @@ -23,7 +23,7 @@ func TestExecutesHook(t *testing.T) { }, } - executor, err := exec.NewCommandExecutor(b.Config.Path) + executor, err := exec.NewCommandExecutor(b.RootPath) require.NoError(t, err) _, out, err := executeHook(context.Background(), executor, b, config.ScriptPreBuild) require.NoError(t, err) diff --git a/bundle/tests/python_wheel_test.go b/bundle/tests/python_wheel_test.go index c44e80a57..412b507fe 100644 --- a/bundle/tests/python_wheel_test.go +++ b/bundle/tests/python_wheel_test.go @@ -79,9 +79,7 @@ func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) { artifact := b.Config.Artifacts["my_test_code-0.0.1-py3-none-any.whl"] require.NotNil(t, artifact) require.Empty(t, artifact.BuildCommand) - require.Contains(t, artifact.Files[0].Source, filepath.Join( - b.Config.Path, - "package", + require.Contains(t, artifact.Files[0].Source, filepath.Join(b.RootPath, "package", "my_test_code-0.0.1-py3-none-any.whl", )) } diff --git a/cmd/bundle/generate/generate_test.go b/cmd/bundle/generate/generate_test.go index b71f1edfd..69ef639ae 100644 --- a/cmd/bundle/generate/generate_test.go +++ b/cmd/bundle/generate/generate_test.go @@ -10,7 +10,6 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -25,9 +24,7 @@ func TestGeneratePipelineCommand(t *testing.T) { root := t.TempDir() b := &bundle.Bundle{ - Config: config.Root{ - Path: root, - }, + RootPath: root, } m := mocks.NewMockWorkspaceClient(t) @@ -125,9 +122,7 @@ func TestGenerateJobCommand(t *testing.T) { root := t.TempDir() b := &bundle.Bundle{ - Config: config.Root{ - Path: root, - }, + RootPath: root, } m := mocks.NewMockWorkspaceClient(t) diff --git a/cmd/sync/sync_test.go b/cmd/sync/sync_test.go index 827c4d509..026d840f7 100644 --- a/cmd/sync/sync_test.go +++ b/cmd/sync/sync_test.go @@ -16,9 +16,8 @@ import ( func TestSyncOptionsFromBundle(t *testing.T) { tempDir := t.TempDir() b := &bundle.Bundle{ + RootPath: tempDir, Config: config.Root{ - Path: tempDir, - Bundle: config.Bundle{ Target: "default", }, diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 2ced12fdd..866a1f6e9 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -36,8 +36,8 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { wsDir := internal.TemporaryWorkspaceDir(t, w) b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Bundle: config.Bundle{ Target: "whatever", }, From f195b844758a82050dfbd8873d58984aa62b4052 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 27 Mar 2024 11:13:54 +0100 Subject: [PATCH 100/286] Remove support for DATABRICKS_BUNDLE_INCLUDES (#1317) ## Changes PR #604 added functionality to load a bundle without a `databricks.yml` if both the `DATABRICKS_BUNDLE_ROOT` and `DATABRICKS_BUNDLE_INCLUDES` environment variables were set. We never ended up using this in downstream tools so this can be removed. ## Tests Unit tests pass. --- bundle/bundle.go | 15 ------ .../config/mutator/process_root_includes.go | 23 --------- .../mutator/process_root_includes_test.go | 40 ---------------- bundle/env/includes.go | 14 ------ bundle/env/includes_test.go | 28 ----------- bundle/root_test.go | 47 ------------------- 6 files changed, 167 deletions(-) delete mode 100644 bundle/env/includes.go delete mode 100644 bundle/env/includes_test.go diff --git a/bundle/bundle.go b/bundle/bundle.go index 0aa44df0b..2e193bbf3 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -70,23 +70,8 @@ func Load(ctx context.Context, path string) (*Bundle, error) { b := &Bundle{ RootPath: filepath.Clean(path), } - stat, err := os.Stat(path) - if err != nil { - return nil, err - } configFile, err := config.FileNames.FindInPath(path) if err != nil { - _, hasRootEnv := env.Root(ctx) - _, hasIncludesEnv := env.Includes(ctx) - if hasRootEnv && hasIncludesEnv && stat.IsDir() { - log.Debugf(ctx, "No bundle configuration; using bundle root: %s", path) - b.Config = config.Root{ - Bundle: config.Bundle{ - Name: filepath.Base(path), - }, - } - return b, nil - } return nil, err } log.Debugf(ctx, "Loading bundle configuration from: %s", configFile) diff --git a/bundle/config/mutator/process_root_includes.go b/bundle/config/mutator/process_root_includes.go index 4e4aeef43..c5e0a22c5 100644 --- a/bundle/config/mutator/process_root_includes.go +++ b/bundle/config/mutator/process_root_includes.go @@ -2,26 +2,15 @@ package mutator import ( "context" - "os" "path/filepath" "slices" "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/libs/diag" ) -// Get extra include paths from environment variable -func getExtraIncludePaths(ctx context.Context) []string { - value, exists := env.Includes(ctx) - if !exists { - return nil - } - return strings.Split(value, string(os.PathListSeparator)) -} - type processRootIncludes struct{} // ProcessRootIncludes expands the patterns in the configuration's include list @@ -48,18 +37,6 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag. // This is stored in the bundle configuration for observability. var files []string - // Converts extra include paths from environment variable to relative paths - for _, extraIncludePath := range getExtraIncludePaths(ctx) { - if filepath.IsAbs(extraIncludePath) { - rel, err := filepath.Rel(b.RootPath, extraIncludePath) - if err != nil { - return diag.Errorf("unable to include file '%s': %v", extraIncludePath, err) - } - extraIncludePath = rel - } - b.Config.Include = append(b.Config.Include, extraIncludePath) - } - // For each glob, find all files to load. // Ordering of the list of globs is maintained in the output. // For matches that appear in multiple globs, only the first is kept. diff --git a/bundle/config/mutator/process_root_includes_test.go b/bundle/config/mutator/process_root_includes_test.go index d3aaa974d..675dd9acf 100644 --- a/bundle/config/mutator/process_root_includes_test.go +++ b/bundle/config/mutator/process_root_includes_test.go @@ -2,16 +2,12 @@ package mutator_test import ( "context" - "os" - "path" "runtime" - "strings" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -115,39 +111,3 @@ func TestProcessRootIncludesNotExists(t *testing.T) { require.True(t, diags.HasError()) assert.ErrorContains(t, diags.Error(), "notexist.yml defined in 'include' section does not match any files") } - -func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) { - rootPath := t.TempDir() - testYamlName := "extra_include_path.yml" - testutil.Touch(t, rootPath, testYamlName) - t.Setenv(env.IncludesVariable, path.Join(rootPath, testYamlName)) - - b := &bundle.Bundle{ - RootPath: rootPath, - } - - diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, diags.Error()) - assert.Contains(t, b.Config.Include, testYamlName) -} - -func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) { - rootPath := t.TempDir() - testYamlName := "extra_include_path.yml" - testutil.Touch(t, rootPath, testYamlName) - t.Setenv(env.IncludesVariable, strings.Join( - []string{ - path.Join(rootPath, testYamlName), - path.Join(rootPath, testYamlName), - }, - string(os.PathListSeparator), - )) - - b := &bundle.Bundle{ - RootPath: rootPath, - } - - diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, diags.Error()) - assert.Equal(t, []string{testYamlName}, b.Config.Include) -} diff --git a/bundle/env/includes.go b/bundle/env/includes.go deleted file mode 100644 index 4ade01877..000000000 --- a/bundle/env/includes.go +++ /dev/null @@ -1,14 +0,0 @@ -package env - -import "context" - -// IncludesVariable names the environment variable that holds additional configuration paths to include -// during bundle configuration loading. Also see `bundle/config/mutator/process_root_includes.go`. -const IncludesVariable = "DATABRICKS_BUNDLE_INCLUDES" - -// Includes returns the bundle Includes environment variable. -func Includes(ctx context.Context) (string, bool) { - return get(ctx, []string{ - IncludesVariable, - }) -} diff --git a/bundle/env/includes_test.go b/bundle/env/includes_test.go deleted file mode 100644 index d9366a59f..000000000 --- a/bundle/env/includes_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package env - -import ( - "context" - "testing" - - "github.com/databricks/cli/internal/testutil" - "github.com/stretchr/testify/assert" -) - -func TestIncludes(t *testing.T) { - ctx := context.Background() - - testutil.CleanupEnvironment(t) - - t.Run("set", func(t *testing.T) { - t.Setenv("DATABRICKS_BUNDLE_INCLUDES", "foo") - includes, ok := Includes(ctx) - assert.True(t, ok) - assert.Equal(t, "foo", includes) - }) - - t.Run("not set", func(t *testing.T) { - includes, ok := Includes(ctx) - assert.False(t, ok) - assert.Equal(t, "", includes) - }) -} diff --git a/bundle/root_test.go b/bundle/root_test.go index a83f36ace..99bf58a00 100644 --- a/bundle/root_test.go +++ b/bundle/root_test.go @@ -9,7 +9,6 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/internal/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -94,49 +93,3 @@ func TestRootLookupError(t *testing.T) { _, err := mustGetRoot(ctx) require.ErrorContains(t, err, "unable to locate bundle root") } - -func TestLoadYamlWhenIncludesEnvPresent(t *testing.T) { - ctx := context.Background() - testutil.Chdir(t, filepath.Join(".", "tests", "basic")) - t.Setenv(env.IncludesVariable, "test") - - bundle, err := MustLoad(ctx) - assert.NoError(t, err) - assert.Equal(t, "basic", bundle.Config.Bundle.Name) - - cwd, err := os.Getwd() - assert.NoError(t, err) - assert.Equal(t, cwd, bundle.RootPath) -} - -func TestLoadDefautlBundleWhenNoYamlAndRootAndIncludesEnvPresent(t *testing.T) { - ctx := context.Background() - dir := t.TempDir() - testutil.Chdir(t, dir) - t.Setenv(env.RootVariable, dir) - t.Setenv(env.IncludesVariable, "test") - - bundle, err := MustLoad(ctx) - assert.NoError(t, err) - assert.Equal(t, dir, bundle.RootPath) -} - -func TestErrorIfNoYamlNoRootEnvAndIncludesEnvPresent(t *testing.T) { - ctx := context.Background() - dir := t.TempDir() - testutil.Chdir(t, dir) - t.Setenv(env.IncludesVariable, "test") - - _, err := MustLoad(ctx) - assert.Error(t, err) -} - -func TestErrorIfNoYamlNoIncludesEnvAndRootEnvPresent(t *testing.T) { - ctx := context.Background() - dir := t.TempDir() - testutil.Chdir(t, dir) - t.Setenv(env.RootVariable, dir) - - _, err := MustLoad(ctx) - assert.Error(t, err) -} From ca534d596bb410dd1fbae3e90d951ca434fd2dd3 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 27 Mar 2024 11:49:05 +0100 Subject: [PATCH 101/286] Load bundle configuration from mutator (#1318) ## Changes Prior to this change, the bundle configuration entry point was loaded from the function `bundle.Load`. Other configuration files were only loaded once the caller applied the first set of mutators. This separation was unnecessary and not ideal in light of gathering diagnostics while loading _any_ configuration file, not just the ones from the includes. This change: * Updates `bundle.Load` to only verify that the specified path is a valid bundle root. * Moves mutators that perform loading to `bundle/config/loader`. * Adds a "load" phase that takes the place of applying `DefaultMutators`. Follow ups: * Rename `bundle.Load` -> `bundle.Find` (because it no longer performs loading) This change depends on #1316 and #1317. ## Tests Tests pass. --- bundle/bundle.go | 7 +--- bundle/bundle_test.go | 4 +-- bundle/config/loader/entry_point.go | 34 +++++++++++++++++++ bundle/config/loader/entry_point_test.go | 26 ++++++++++++++ .../{mutator => loader}/process_include.go | 2 +- .../process_include_test.go | 21 +++++------- .../process_root_includes.go | 2 +- .../process_root_includes_test.go | 16 ++++----- bundle/config/loader/testdata/databricks.yml | 2 ++ bundle/config/loader/testdata/host.yml | 2 ++ bundle/config/mutator/mutator.go | 6 +++- bundle/phases/load.go | 29 ++++++++++++++++ bundle/tests/conflicting_resource_ids_test.go | 12 ++++--- bundle/tests/include_test.go | 4 +-- bundle/tests/loader.go | 8 ++--- bundle/tests/python_wheel_test.go | 12 +++---- cmd/root/bundle_test.go | 4 +++ libs/template/renderer_test.go | 15 ++++---- 18 files changed, 149 insertions(+), 57 deletions(-) create mode 100644 bundle/config/loader/entry_point.go create mode 100644 bundle/config/loader/entry_point_test.go rename bundle/config/{mutator => loader}/process_include.go (98%) rename bundle/config/{mutator => loader}/process_include_test.go (54%) rename bundle/config/{mutator => loader}/process_root_includes.go (99%) rename bundle/config/{mutator => loader}/process_root_includes_test.go (80%) create mode 100644 bundle/config/loader/testdata/databricks.yml create mode 100644 bundle/config/loader/testdata/host.yml create mode 100644 bundle/phases/load.go diff --git a/bundle/bundle.go b/bundle/bundle.go index 2e193bbf3..977ca2247 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -74,12 +74,7 @@ func Load(ctx context.Context, path string) (*Bundle, error) { if err != nil { return nil, err } - log.Debugf(ctx, "Loading bundle configuration from: %s", configFile) - root, err := config.Load(configFile) - if err != nil { - return nil, err - } - b.Config = *root + log.Debugf(ctx, "Found bundle root at %s (file %s)", b.RootPath, configFile) return b, nil } diff --git a/bundle/bundle_test.go b/bundle/bundle_test.go index be716a40a..908b446e2 100644 --- a/bundle/bundle_test.go +++ b/bundle/bundle_test.go @@ -20,8 +20,8 @@ func TestLoadNotExists(t *testing.T) { func TestLoadExists(t *testing.T) { b, err := Load(context.Background(), "./tests/basic") - require.Nil(t, err) - assert.Equal(t, "basic", b.Config.Bundle.Name) + assert.NoError(t, err) + assert.NotNil(t, b) } func TestBundleCacheDir(t *testing.T) { diff --git a/bundle/config/loader/entry_point.go b/bundle/config/loader/entry_point.go new file mode 100644 index 000000000..24ba2f068 --- /dev/null +++ b/bundle/config/loader/entry_point.go @@ -0,0 +1,34 @@ +package loader + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" +) + +type entryPoint struct{} + +// EntryPoint loads the entry point configuration. +func EntryPoint() bundle.Mutator { + return &entryPoint{} +} + +func (m *entryPoint) Name() string { + return "EntryPoint" +} + +func (m *entryPoint) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + path, err := config.FileNames.FindInPath(b.RootPath) + if err != nil { + return diag.FromErr(err) + } + this, err := config.Load(path) + if err != nil { + return diag.FromErr(err) + } + // TODO: Return actual warnings. + err = b.Config.Merge(this) + return diag.FromErr(err) +} diff --git a/bundle/config/loader/entry_point_test.go b/bundle/config/loader/entry_point_test.go new file mode 100644 index 000000000..80271f0b7 --- /dev/null +++ b/bundle/config/loader/entry_point_test.go @@ -0,0 +1,26 @@ +package loader_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/loader" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEntryPointNoRootPath(t *testing.T) { + b := &bundle.Bundle{} + diags := bundle.Apply(context.Background(), b, loader.EntryPoint()) + require.Error(t, diags.Error()) +} + +func TestEntryPoint(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "testdata", + } + diags := bundle.Apply(context.Background(), b, loader.EntryPoint()) + require.NoError(t, diags.Error()) + assert.Equal(t, "loader_test", b.Config.Bundle.Name) +} diff --git a/bundle/config/mutator/process_include.go b/bundle/config/loader/process_include.go similarity index 98% rename from bundle/config/mutator/process_include.go rename to bundle/config/loader/process_include.go index 23acdf12a..328f4eacf 100644 --- a/bundle/config/mutator/process_include.go +++ b/bundle/config/loader/process_include.go @@ -1,4 +1,4 @@ -package mutator +package loader import ( "context" diff --git a/bundle/config/mutator/process_include_test.go b/bundle/config/loader/process_include_test.go similarity index 54% rename from bundle/config/mutator/process_include_test.go rename to bundle/config/loader/process_include_test.go index b4fa3ccda..da4da9ff6 100644 --- a/bundle/config/mutator/process_include_test.go +++ b/bundle/config/loader/process_include_test.go @@ -1,22 +1,20 @@ -package mutator_test +package loader_test import ( "context" - "fmt" - "os" "path/filepath" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/loader" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestProcessInclude(t *testing.T) { b := &bundle.Bundle{ - RootPath: t.TempDir(), + RootPath: "testdata", Config: config.Root{ Workspace: config.Workspace{ Host: "foo", @@ -24,15 +22,14 @@ func TestProcessInclude(t *testing.T) { }, } - relPath := "./file.yml" - fullPath := filepath.Join(b.RootPath, relPath) - f, err := os.Create(fullPath) - require.NoError(t, err) - fmt.Fprint(f, "workspace:\n host: bar\n") - f.Close() + m := loader.ProcessInclude(filepath.Join(b.RootPath, "host.yml"), "host.yml") + assert.Equal(t, "ProcessInclude(host.yml)", m.Name()) + // Assert the host value prior to applying the mutator assert.Equal(t, "foo", b.Config.Workspace.Host) - diags := bundle.Apply(context.Background(), b, mutator.ProcessInclude(fullPath, relPath)) + + // Apply the mutator and assert that the host value has been updated + diags := bundle.Apply(context.Background(), b, m) require.NoError(t, diags.Error()) assert.Equal(t, "bar", b.Config.Workspace.Host) } diff --git a/bundle/config/mutator/process_root_includes.go b/bundle/config/loader/process_root_includes.go similarity index 99% rename from bundle/config/mutator/process_root_includes.go rename to bundle/config/loader/process_root_includes.go index c5e0a22c5..25f284fd3 100644 --- a/bundle/config/mutator/process_root_includes.go +++ b/bundle/config/loader/process_root_includes.go @@ -1,4 +1,4 @@ -package mutator +package loader import ( "context" diff --git a/bundle/config/mutator/process_root_includes_test.go b/bundle/config/loader/process_root_includes_test.go similarity index 80% rename from bundle/config/mutator/process_root_includes_test.go rename to bundle/config/loader/process_root_includes_test.go index 675dd9acf..737dbbefd 100644 --- a/bundle/config/mutator/process_root_includes_test.go +++ b/bundle/config/loader/process_root_includes_test.go @@ -1,4 +1,4 @@ -package mutator_test +package loader_test import ( "context" @@ -7,7 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/loader" "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,7 +17,7 @@ func TestProcessRootIncludesEmpty(t *testing.T) { b := &bundle.Bundle{ RootPath: ".", } - diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) require.NoError(t, diags.Error()) } @@ -37,7 +37,7 @@ func TestProcessRootIncludesAbs(t *testing.T) { }, }, } - diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) require.True(t, diags.HasError()) assert.ErrorContains(t, diags.Error(), "must be relative paths") } @@ -56,7 +56,7 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) { testutil.Touch(t, b.RootPath, "a.yml") testutil.Touch(t, b.RootPath, "b.yml") - diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) require.NoError(t, diags.Error()) assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include) } @@ -75,7 +75,7 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) { testutil.Touch(t, b.RootPath, "a1.yml") testutil.Touch(t, b.RootPath, "b1.yml") - diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) require.NoError(t, diags.Error()) assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include) } @@ -93,7 +93,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) { testutil.Touch(t, b.RootPath, "a.yml") - diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) require.NoError(t, diags.Error()) assert.Equal(t, []string{"a.yml"}, b.Config.Include) } @@ -107,7 +107,7 @@ func TestProcessRootIncludesNotExists(t *testing.T) { }, }, } - diags := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) require.True(t, diags.HasError()) assert.ErrorContains(t, diags.Error(), "notexist.yml defined in 'include' section does not match any files") } diff --git a/bundle/config/loader/testdata/databricks.yml b/bundle/config/loader/testdata/databricks.yml new file mode 100644 index 000000000..1a0635b89 --- /dev/null +++ b/bundle/config/loader/testdata/databricks.yml @@ -0,0 +1,2 @@ +bundle: + name: loader_test diff --git a/bundle/config/loader/testdata/host.yml b/bundle/config/loader/testdata/host.yml new file mode 100644 index 000000000..f83830d1d --- /dev/null +++ b/bundle/config/loader/testdata/host.yml @@ -0,0 +1,2 @@ +workspace: + host: bar diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index c45a6c15e..99b7e9ac9 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -3,13 +3,17 @@ package mutator import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/loader" "github.com/databricks/cli/bundle/scripts" ) func DefaultMutators() []bundle.Mutator { return []bundle.Mutator{ + loader.EntryPoint(), + loader.ProcessRootIncludes(), + + // Execute preinit script after loading all configuration files. scripts.Execute(config.ScriptPreInit), - ProcessRootIncludes(), EnvironmentsToTargets(), InitializeVariables(), DefineDefaultTarget(), diff --git a/bundle/phases/load.go b/bundle/phases/load.go new file mode 100644 index 000000000..fa0668775 --- /dev/null +++ b/bundle/phases/load.go @@ -0,0 +1,29 @@ +package phases + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" +) + +// The load phase loads configuration from disk and performs +// lightweight preprocessing (anything that can be done without network I/O). +func Load() bundle.Mutator { + return newPhase( + "load", + mutator.DefaultMutators(), + ) +} + +func LoadDefaultTarget() bundle.Mutator { + return newPhase( + "load", + append(mutator.DefaultMutators(), mutator.SelectDefaultTarget()), + ) +} + +func LoadNamedTarget(target string) bundle.Mutator { + return newPhase( + "load", + append(mutator.DefaultMutators(), mutator.SelectTarget(target)), + ) +} diff --git a/bundle/tests/conflicting_resource_ids_test.go b/bundle/tests/conflicting_resource_ids_test.go index 16dd1c33a..e7f0aa28f 100644 --- a/bundle/tests/conflicting_resource_ids_test.go +++ b/bundle/tests/conflicting_resource_ids_test.go @@ -7,23 +7,25 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/phases" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestConflictingResourceIdsNoSubconfig(t *testing.T) { ctx := context.Background() - _, err := bundle.Load(ctx, "./conflicting_resource_ids/no_subconfigurations") + b, err := bundle.Load(ctx, "./conflicting_resource_ids/no_subconfigurations") + require.NoError(t, err) + diags := bundle.Apply(ctx, b, phases.Load()) bundleConfigPath := filepath.FromSlash("conflicting_resource_ids/no_subconfigurations/databricks.yml") - assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, bundleConfigPath)) + assert.ErrorContains(t, diags.Error(), fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, bundleConfigPath)) } func TestConflictingResourceIdsOneSubconfig(t *testing.T) { ctx := context.Background() b, err := bundle.Load(ctx, "./conflicting_resource_ids/one_subconfiguration") require.NoError(t, err) - diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + diags := bundle.Apply(ctx, b, phases.Load()) bundleConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/databricks.yml") resourcesConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/resources.yml") assert.ErrorContains(t, diags.Error(), fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, resourcesConfigPath)) @@ -33,7 +35,7 @@ func TestConflictingResourceIdsTwoSubconfigs(t *testing.T) { ctx := context.Background() b, err := bundle.Load(ctx, "./conflicting_resource_ids/two_subconfigurations") require.NoError(t, err) - diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + diags := bundle.Apply(ctx, b, phases.Load()) resources1ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources1.yml") resources2ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources2.yml") assert.ErrorContains(t, diags.Error(), fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", resources1ConfigPath, resources2ConfigPath)) diff --git a/bundle/tests/include_test.go b/bundle/tests/include_test.go index fd8ae7198..5b0235f60 100644 --- a/bundle/tests/include_test.go +++ b/bundle/tests/include_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/phases" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -17,7 +17,7 @@ func TestIncludeInvalid(t *testing.T) { ctx := context.Background() b, err := bundle.Load(ctx, "./include_invalid") require.NoError(t, err) - diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + diags := bundle.Apply(ctx, b, phases.Load()) require.Error(t, diags.Error()) assert.ErrorContains(t, diags.Error(), "notexists.yml defined in 'include' section does not match any files") } diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index 228763ce9..e7cf18f73 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/phases" "github.com/stretchr/testify/require" ) @@ -13,7 +14,7 @@ func load(t *testing.T, path string) *bundle.Bundle { ctx := context.Background() b, err := bundle.Load(ctx, path) require.NoError(t, err) - diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + diags := bundle.Apply(ctx, b, phases.Load()) require.NoError(t, diags.Error()) return b } @@ -22,9 +23,8 @@ func loadTarget(t *testing.T, path, env string) *bundle.Bundle { ctx := context.Background() b, err := bundle.Load(ctx, path) require.NoError(t, err) - diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutatorsForTarget(env)...)) - require.NoError(t, diags.Error()) - diags = bundle.Apply(ctx, b, bundle.Seq( + diags := bundle.Apply(ctx, b, bundle.Seq( + phases.LoadNamedTarget(env), mutator.RewriteSyncPaths(), mutator.MergeJobClusters(), mutator.MergeJobTasks(), diff --git a/bundle/tests/python_wheel_test.go b/bundle/tests/python_wheel_test.go index 412b507fe..e2266516a 100644 --- a/bundle/tests/python_wheel_test.go +++ b/bundle/tests/python_wheel_test.go @@ -16,8 +16,7 @@ func TestPythonWheelBuild(t *testing.T) { b, err := bundle.Load(ctx, "./python_wheel/python_wheel") require.NoError(t, err) - m := phases.Build() - diags := bundle.Apply(ctx, b, m) + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) require.NoError(t, diags.Error()) matches, err := filepath.Glob("./python_wheel/python_wheel/my_test_code/dist/my_test_code-*.whl") @@ -34,8 +33,7 @@ func TestPythonWheelBuildAutoDetect(t *testing.T) { b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_artifact") require.NoError(t, err) - m := phases.Build() - diags := bundle.Apply(ctx, b, m) + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) require.NoError(t, diags.Error()) matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact/dist/my_test_code-*.whl") @@ -52,8 +50,7 @@ func TestPythonWheelWithDBFSLib(t *testing.T) { b, err := bundle.Load(ctx, "./python_wheel/python_wheel_dbfs_lib") require.NoError(t, err) - m := phases.Build() - diags := bundle.Apply(ctx, b, m) + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) require.NoError(t, diags.Error()) match := libraries.MatchWithArtifacts() @@ -66,8 +63,7 @@ func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) { b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_artifact_no_setup") require.NoError(t, err) - m := phases.Build() - diags := bundle.Apply(ctx, b, m) + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) require.NoError(t, diags.Error()) match := libraries.MatchWithArtifacts() diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index a3dec491d..97412ff69 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -40,8 +40,12 @@ func emptyCommand(t *testing.T) *cobra.Command { func setup(t *testing.T, cmd *cobra.Command, host string) *bundle.Bundle { setupDatabricksCfg(t) + rootPath := t.TempDir() + testutil.Touch(t, rootPath, "databricks.yml") + err := configureBundle(cmd, []string{"validate"}, func(_ context.Context) (*bundle.Bundle, error) { return &bundle.Bundle{ + RootPath: rootPath, Config: config.Root{ Bundle: config.Bundle{ Name: "test", diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index cad58a532..a8678a525 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -14,7 +14,6 @@ import ( "github.com/databricks/cli/bundle" bundleConfig "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/diag" @@ -66,23 +65,25 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri require.NoError(t, err) err = renderer.persistToDisk() require.NoError(t, err) + b, err := bundle.Load(ctx, filepath.Join(tempDir, "template", "my_project")) require.NoError(t, err) + diags := bundle.Apply(ctx, b, phases.LoadNamedTarget(target)) + require.NoError(t, diags.Error()) // Apply initialize / validation mutators bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { b.Config.Workspace.CurrentUser = &bundleConfig.User{User: cachedUser} + b.Config.Bundle.Terraform = &bundleConfig.Terraform{ + ExecPath: "sh", + } return nil }) b.Tagging = tags.ForCloud(w.Config) b.WorkspaceClient() - b.Config.Bundle.Terraform = &bundleConfig.Terraform{ - ExecPath: "sh", - } - diags := bundle.Apply(ctx, b, bundle.Seq( - bundle.Seq(mutator.DefaultMutators()...), - mutator.SelectTarget(target), + + diags = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), )) require.NoError(t, diags.Error()) From 704d06945975a901cb16328337ac3a24e81a6a6d Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 27 Mar 2024 19:07:59 +0530 Subject: [PATCH 102/286] Make `bundle.deployment` optional in the bundle schema (#1321) ## Changes Makes the field optional by adding the `omitempty` tag. This gets rid of the red squiggly lines in the bundle schema. --- bundle/config/bundle.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bundle/config/bundle.go b/bundle/config/bundle.go index 21278151f..6f991e562 100644 --- a/bundle/config/bundle.go +++ b/bundle/config/bundle.go @@ -42,5 +42,5 @@ type Bundle struct { ComputeID string `json:"compute_id,omitempty"` // Deployment section specifies deployment related configuration for bundle - Deployment Deployment `json:"deployment"` + Deployment Deployment `json:"deployment,omitempty"` } From 5df4c7e134c563bf2ff6c9ac6fbea61511876180 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 27 Mar 2024 21:43:53 +0530 Subject: [PATCH 103/286] Add allow list for resources when bundle `run_as` is set (#1233) ## Changes This PR introduces an allow list for resource types that are allowed when the run_as for the bundle is not the same as the current deployment user. This PR also adds a test to ensure that any new resources added to DABs will have to add the resource to either the allow list or add an error to fail when run_as identity is not the same as deployment user. ## Tests Unit tests --- bundle/config/mutator/run_as.go | 117 ++++++++--- bundle/config/mutator/run_as_test.go | 188 ++++++++++++++++++ bundle/config/root.go | 11 + .../tests/run_as/{ => allowed}/databricks.yml | 26 ++- .../both_sp_and_user/databricks.yml | 17 ++ .../not_allowed/model_serving/databricks.yml | 15 ++ .../neither_sp_nor_user/databricks.yml | 4 + .../databricks.yml | 8 + .../neither_sp_nor_user_override/override.yml | 4 + .../not_allowed/pipelines/databricks.yml | 25 +++ bundle/tests/run_as_test.go | 176 ++++++++++++++-- 11 files changed, 534 insertions(+), 57 deletions(-) create mode 100644 bundle/config/mutator/run_as_test.go rename bundle/tests/run_as/{ => allowed}/databricks.yml (70%) create mode 100644 bundle/tests/run_as/not_allowed/both_sp_and_user/databricks.yml create mode 100644 bundle/tests/run_as/not_allowed/model_serving/databricks.yml create mode 100644 bundle/tests/run_as/not_allowed/neither_sp_nor_user/databricks.yml create mode 100644 bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/databricks.yml create mode 100644 bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/override.yml create mode 100644 bundle/tests/run_as/not_allowed/pipelines/databricks.yml diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index 243f8ef7d..578591eb1 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -2,20 +2,24 @@ package mutator import ( "context" - "slices" + "fmt" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/databricks-sdk-go/service/jobs" ) type setRunAs struct { } -// SetRunAs mutator is used to go over defined resources such as Jobs and DLT Pipelines -// And set correct execution identity ("run_as" for a job or "is_owner" permission for DLT) -// if top-level "run-as" section is defined in the configuration. +// This mutator does two things: +// +// 1. Sets the run_as field for jobs to the value of the run_as field in the bundle. +// +// 2. Validates that the bundle run_as configuration is valid in the context of the bundle. +// If the run_as user is different from the current deployment user, DABs only +// supports a subset of resources. func SetRunAs() bundle.Mutator { return &setRunAs{} } @@ -24,12 +28,94 @@ func (m *setRunAs) Name() string { return "SetRunAs" } +type errUnsupportedResourceTypeForRunAs struct { + resourceType string + resourceLocation dyn.Location + currentUser string + runAsUser string +} + +// TODO(6 March 2024): Link the docs page describing run_as semantics in the error below +// once the page is ready. +func (e errUnsupportedResourceTypeForRunAs) Error() string { + return fmt.Sprintf("%s are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Location of the unsupported resource: %s. Current identity: %s. Run as identity: %s", e.resourceType, e.resourceLocation, e.currentUser, e.runAsUser) +} + +type errBothSpAndUserSpecified struct { + spName string + spLoc dyn.Location + userName string + userLoc dyn.Location +} + +func (e errBothSpAndUserSpecified) Error() string { + return fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name %q is specified at %s. A user_name %q is defined at %s", e.spName, e.spLoc, e.userName, e.userLoc) +} + +func validateRunAs(b *bundle.Bundle) error { + runAs := b.Config.RunAs + + // Error if neither service_principal_name nor user_name are specified + if runAs.ServicePrincipalName == "" && runAs.UserName == "" { + return fmt.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as")) + } + + // Error if both service_principal_name and user_name are specified + if runAs.UserName != "" && runAs.ServicePrincipalName != "" { + return errBothSpAndUserSpecified{ + spName: runAs.ServicePrincipalName, + userName: runAs.UserName, + spLoc: b.Config.GetLocation("run_as.service_principal_name"), + userLoc: b.Config.GetLocation("run_as.user_name"), + } + } + + identity := runAs.ServicePrincipalName + if identity == "" { + identity = runAs.UserName + } + + // All resources are supported if the run_as identity is the same as the current deployment identity. + if identity == b.Config.Workspace.CurrentUser.UserName { + return nil + } + + // DLT pipelines do not support run_as in the API. + if len(b.Config.Resources.Pipelines) > 0 { + return errUnsupportedResourceTypeForRunAs{ + resourceType: "pipelines", + resourceLocation: b.Config.GetLocation("resources.pipelines"), + currentUser: b.Config.Workspace.CurrentUser.UserName, + runAsUser: identity, + } + } + + // Model serving endpoints do not support run_as in the API. + if len(b.Config.Resources.ModelServingEndpoints) > 0 { + return errUnsupportedResourceTypeForRunAs{ + resourceType: "model_serving_endpoints", + resourceLocation: b.Config.GetLocation("resources.model_serving_endpoints"), + currentUser: b.Config.Workspace.CurrentUser.UserName, + runAsUser: identity, + } + } + + return nil +} + func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + // Mutator is a no-op if run_as is not specified in the bundle runAs := b.Config.RunAs if runAs == nil { return nil } + // Assert the run_as configuration is valid in the context of the bundle + if err := validateRunAs(b); err != nil { + return diag.FromErr(err) + } + + // Set run_as for jobs for i := range b.Config.Resources.Jobs { job := b.Config.Resources.Jobs[i] if job.RunAs != nil { @@ -41,26 +127,5 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { } } - me := b.Config.Workspace.CurrentUser.UserName - // If user deploying the bundle and the one defined in run_as are the same - // Do not add IS_OWNER permission. Current user is implied to be an owner in this case. - // Otherwise, it will fail due to this bug https://github.com/databricks/terraform-provider-databricks/issues/2407 - if runAs.UserName == me || runAs.ServicePrincipalName == me { - return nil - } - - for i := range b.Config.Resources.Pipelines { - pipeline := b.Config.Resources.Pipelines[i] - pipeline.Permissions = slices.DeleteFunc(pipeline.Permissions, func(p resources.Permission) bool { - return (runAs.ServicePrincipalName != "" && p.ServicePrincipalName == runAs.ServicePrincipalName) || - (runAs.UserName != "" && p.UserName == runAs.UserName) - }) - pipeline.Permissions = append(pipeline.Permissions, resources.Permission{ - Level: "IS_OWNER", - ServicePrincipalName: runAs.ServicePrincipalName, - UserName: runAs.UserName, - }) - } - return nil } diff --git a/bundle/config/mutator/run_as_test.go b/bundle/config/mutator/run_as_test.go new file mode 100644 index 000000000..d6fb2939f --- /dev/null +++ b/bundle/config/mutator/run_as_test.go @@ -0,0 +1,188 @@ +package mutator + +import ( + "context" + "slices" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func allResourceTypes(t *testing.T) []string { + // Compute supported resource types based on the `Resources{}` struct. + r := config.Resources{} + rv, err := convert.FromTyped(r, dyn.NilValue) + require.NoError(t, err) + normalized, _ := convert.Normalize(r, rv, convert.IncludeMissingFields) + resourceTypes := []string{} + for _, k := range normalized.MustMap().Keys() { + resourceTypes = append(resourceTypes, k.MustString()) + } + slices.Sort(resourceTypes) + + // Assert the total list of resource supported, as a sanity check that using + // the dyn library gives us the correct list of all resources supported. Please + // also update this check when adding a new resource + require.Equal(t, []string{ + "experiments", + "jobs", + "model_serving_endpoints", + "models", + "pipelines", + "registered_models", + }, + resourceTypes, + ) + + return resourceTypes +} + +func TestRunAsWorksForAllowedResources(t *testing.T) { + config := config.Root{ + Workspace: config.Workspace{ + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "alice", + }, + }, + }, + RunAs: &jobs.JobRunAs{ + UserName: "bob", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job_one": { + JobSettings: &jobs.JobSettings{ + Name: "foo", + }, + }, + "job_two": { + JobSettings: &jobs.JobSettings{ + Name: "bar", + }, + }, + "job_three": { + JobSettings: &jobs.JobSettings{ + Name: "baz", + }, + }, + }, + Models: map[string]*resources.MlflowModel{ + "model_one": {}, + }, + RegisteredModels: map[string]*resources.RegisteredModel{ + "registered_model_one": {}, + }, + Experiments: map[string]*resources.MlflowExperiment{ + "experiment_one": {}, + }, + }, + } + + b := &bundle.Bundle{ + Config: config, + } + + diags := bundle.Apply(context.Background(), b, SetRunAs()) + assert.NoError(t, diags.Error()) + + for _, job := range b.Config.Resources.Jobs { + assert.Equal(t, "bob", job.RunAs.UserName) + } +} + +func TestRunAsErrorForUnsupportedResources(t *testing.T) { + // Bundle "run_as" has two modes of operation, each with a different set of + // resources that are supported. + // Cases: + // 1. When the bundle "run_as" identity is same as the current deployment + // identity. In this case all resources are supported. + // 2. When the bundle "run_as" identity is different from the current + // deployment identity. In this case only a subset of resources are + // supported. This subset of resources are defined in the allow list below. + // + // To be a part of the allow list, the resource must satisfy one of the following + // two conditions: + // 1. The resource supports setting a run_as identity to a different user + // from the owner/creator of the resource. For example, jobs. + // 2. Run as semantics do not apply to the resource. We do not plan to add + // platform side support for `run_as` for these resources. For example, + // experiments or registered models. + // + // Any resource that is not on the allow list cannot be used when the bundle + // run_as is different from the current deployment user. "bundle validate" must + // return an error if such a resource has been defined, and the run_as identity + // is different from the current deployment identity. + // + // Action Item: If you are adding a new resource to DABs, please check in with + // the relevant owning team whether the resource should be on the allow list or (implicitly) on + // the deny list. Any resources that could have run_as semantics in the future + // should be on the deny list. + // For example: Teams for pipelines, model serving endpoints or Lakeview dashboards + // are planning to add platform side support for `run_as` for these resources at + // some point in the future. These resources are (implicitly) on the deny list, since + // they are not on the allow list below. + allowList := []string{ + "jobs", + "models", + "registered_models", + "experiments", + } + + base := config.Root{ + Workspace: config.Workspace{ + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "alice", + }, + }, + }, + RunAs: &jobs.JobRunAs{ + UserName: "bob", + }, + } + + v, err := convert.FromTyped(base, dyn.NilValue) + require.NoError(t, err) + + for _, rt := range allResourceTypes(t) { + // Skip allowed resources + if slices.Contains(allowList, rt) { + continue + } + + // Add an instance of the resource type that is not on the allow list to + // the bundle configuration. + nv, err := dyn.SetByPath(v, dyn.NewPath(dyn.Key("resources"), dyn.Key(rt)), dyn.V(map[string]dyn.Value{ + "foo": dyn.V(map[string]dyn.Value{ + "path": dyn.V("bar"), + }), + })) + require.NoError(t, err) + + // Get back typed configuration from the newly created invalid bundle configuration. + r := &config.Root{} + err = convert.ToTyped(r, nv) + require.NoError(t, err) + + // Assert this invalid bundle configuration fails validation. + b := &bundle.Bundle{ + Config: *r, + } + diags := bundle.Apply(context.Background(), b, SetRunAs()) + assert.Equal(t, diags.Error().Error(), errUnsupportedResourceTypeForRunAs{ + resourceType: rt, + resourceLocation: dyn.Location{}, + currentUser: "alice", + runAsUser: "bob", + }.Error(), "expected run_as with a different identity than the current deployment user to not supported for resources of type: %s", rt) + } +} diff --git a/bundle/config/root.go b/bundle/config/root.go index a3dd0d28b..0e54c04ce 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -448,3 +448,14 @@ func validateVariableOverrides(root, target dyn.Value) (err error) { return nil } + +// Best effort to get the location of configuration value at the specified path. +// This function is useful to annotate error messages with the location, because +// we don't want to fail with a different error message if we cannot retrieve the location. +func (r *Root) GetLocation(path string) dyn.Location { + v, err := dyn.Get(r.value, path) + if err != nil { + return dyn.Location{} + } + return v.Location() +} diff --git a/bundle/tests/run_as/databricks.yml b/bundle/tests/run_as/allowed/databricks.yml similarity index 70% rename from bundle/tests/run_as/databricks.yml rename to bundle/tests/run_as/allowed/databricks.yml index 1cdc9e44b..6cb9cd5a4 100644 --- a/bundle/tests/run_as/databricks.yml +++ b/bundle/tests/run_as/allowed/databricks.yml @@ -11,20 +11,6 @@ targets: user_name: "my_user_name" resources: - pipelines: - nyc_taxi_pipeline: - name: "nyc taxi loader" - - permissions: - - level: CAN_VIEW - service_principal_name: my_service_principal - - level: CAN_VIEW - user_name: my_user_name - - libraries: - - notebook: - path: ./dlt/nyc_taxi_loader - jobs: job_one: name: Job One @@ -52,3 +38,15 @@ resources: - task_key: "task_three" notebook_task: notebook_path: "./test.py" + + models: + model_one: + name: "skynet" + + registered_models: + model_two: + name: "skynet (in UC)" + + experiments: + experiment_one: + name: "experiment_one" diff --git a/bundle/tests/run_as/not_allowed/both_sp_and_user/databricks.yml b/bundle/tests/run_as/not_allowed/both_sp_and_user/databricks.yml new file mode 100644 index 000000000..dfab50e94 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/both_sp_and_user/databricks.yml @@ -0,0 +1,17 @@ +bundle: + name: "run_as" + +# This is not allowed because both service_principal_name and user_name are set +run_as: + service_principal_name: "my_service_principal" + user_name: "my_user_name" + +resources: + jobs: + job_one: + name: Job One + + tasks: + - task_key: "task_one" + notebook_task: + notebook_path: "./test.py" diff --git a/bundle/tests/run_as/not_allowed/model_serving/databricks.yml b/bundle/tests/run_as/not_allowed/model_serving/databricks.yml new file mode 100644 index 000000000..cdd7e0913 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/model_serving/databricks.yml @@ -0,0 +1,15 @@ +bundle: + name: "run_as" + +run_as: + service_principal_name: "my_service_principal" + +targets: + development: + run_as: + user_name: "my_user_name" + +resources: + model_serving_endpoints: + foo: + name: "skynet" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/databricks.yml new file mode 100644 index 000000000..a328fbd8c --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/databricks.yml @@ -0,0 +1,4 @@ +bundle: + name: "abc" + +run_as: diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/databricks.yml new file mode 100644 index 000000000..f7c1d728d --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/databricks.yml @@ -0,0 +1,8 @@ +bundle: + name: "abc" + +run_as: + user_name: "my_user_name" + +include: + - ./override.yml diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/override.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/override.yml new file mode 100644 index 000000000..d093e4c95 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/override.yml @@ -0,0 +1,4 @@ +targets: + development: + default: true + run_as: diff --git a/bundle/tests/run_as/not_allowed/pipelines/databricks.yml b/bundle/tests/run_as/not_allowed/pipelines/databricks.yml new file mode 100644 index 000000000..d59c34ab6 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/pipelines/databricks.yml @@ -0,0 +1,25 @@ +bundle: + name: "run_as" + +run_as: + service_principal_name: "my_service_principal" + +targets: + development: + run_as: + user_name: "my_user_name" + +resources: + pipelines: + nyc_taxi_pipeline: + name: "nyc taxi loader" + + permissions: + - level: CAN_VIEW + service_principal_name: my_service_principal + - level: CAN_VIEW + user_name: my_user_name + + libraries: + - notebook: + path: ./dlt/nyc_taxi_loader diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 321bb5130..3b9deafe0 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -2,18 +2,22 @@ package config_tests import ( "context" + "fmt" + "path/filepath" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/ml" "github.com/stretchr/testify/assert" ) -func TestRunAsDefault(t *testing.T) { - b := load(t, "./run_as") +func TestRunAsForAllowed(t *testing.T) { + b := load(t, "./run_as/allowed") ctx := context.Background() bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { @@ -31,6 +35,7 @@ func TestRunAsDefault(t *testing.T) { assert.Len(t, b.Config.Resources.Jobs, 3) jobs := b.Config.Resources.Jobs + // job_one and job_two should have the same run_as identity as the bundle. assert.NotNil(t, jobs["job_one"].RunAs) assert.Equal(t, "my_service_principal", jobs["job_one"].RunAs.ServicePrincipalName) assert.Equal(t, "", jobs["job_one"].RunAs.UserName) @@ -39,21 +44,19 @@ func TestRunAsDefault(t *testing.T) { assert.Equal(t, "my_service_principal", jobs["job_two"].RunAs.ServicePrincipalName) assert.Equal(t, "", jobs["job_two"].RunAs.UserName) + // job_three should retain the job level run_as identity. assert.NotNil(t, jobs["job_three"].RunAs) assert.Equal(t, "my_service_principal_for_job", jobs["job_three"].RunAs.ServicePrincipalName) assert.Equal(t, "", jobs["job_three"].RunAs.UserName) - pipelines := b.Config.Resources.Pipelines - assert.Len(t, pipelines["nyc_taxi_pipeline"].Permissions, 2) - assert.Equal(t, "CAN_VIEW", pipelines["nyc_taxi_pipeline"].Permissions[0].Level) - assert.Equal(t, "my_user_name", pipelines["nyc_taxi_pipeline"].Permissions[0].UserName) - - assert.Equal(t, "IS_OWNER", pipelines["nyc_taxi_pipeline"].Permissions[1].Level) - assert.Equal(t, "my_service_principal", pipelines["nyc_taxi_pipeline"].Permissions[1].ServicePrincipalName) + // Assert other resources are not affected. + assert.Equal(t, ml.Model{Name: "skynet"}, *b.Config.Resources.Models["model_one"].Model) + assert.Equal(t, catalog.CreateRegisteredModelRequest{Name: "skynet (in UC)"}, *b.Config.Resources.RegisteredModels["model_two"].CreateRegisteredModelRequest) + assert.Equal(t, ml.Experiment{Name: "experiment_one"}, *b.Config.Resources.Experiments["experiment_one"].Experiment) } -func TestRunAsDevelopment(t *testing.T) { - b := loadTarget(t, "./run_as", "development") +func TestRunAsForAllowedWithTargetOverride(t *testing.T) { + b := loadTarget(t, "./run_as/allowed", "development") ctx := context.Background() bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { @@ -71,6 +74,8 @@ func TestRunAsDevelopment(t *testing.T) { assert.Len(t, b.Config.Resources.Jobs, 3) jobs := b.Config.Resources.Jobs + // job_one and job_two should have the same run_as identity as the bundle's + // development target. assert.NotNil(t, jobs["job_one"].RunAs) assert.Equal(t, "", jobs["job_one"].RunAs.ServicePrincipalName) assert.Equal(t, "my_user_name", jobs["job_one"].RunAs.UserName) @@ -79,15 +84,152 @@ func TestRunAsDevelopment(t *testing.T) { assert.Equal(t, "", jobs["job_two"].RunAs.ServicePrincipalName) assert.Equal(t, "my_user_name", jobs["job_two"].RunAs.UserName) + // job_three should retain the job level run_as identity. assert.NotNil(t, jobs["job_three"].RunAs) assert.Equal(t, "my_service_principal_for_job", jobs["job_three"].RunAs.ServicePrincipalName) assert.Equal(t, "", jobs["job_three"].RunAs.UserName) - pipelines := b.Config.Resources.Pipelines - assert.Len(t, pipelines["nyc_taxi_pipeline"].Permissions, 2) - assert.Equal(t, "CAN_VIEW", pipelines["nyc_taxi_pipeline"].Permissions[0].Level) - assert.Equal(t, "my_service_principal", pipelines["nyc_taxi_pipeline"].Permissions[0].ServicePrincipalName) + // Assert other resources are not affected. + assert.Equal(t, ml.Model{Name: "skynet"}, *b.Config.Resources.Models["model_one"].Model) + assert.Equal(t, catalog.CreateRegisteredModelRequest{Name: "skynet (in UC)"}, *b.Config.Resources.RegisteredModels["model_two"].CreateRegisteredModelRequest) + assert.Equal(t, ml.Experiment{Name: "experiment_one"}, *b.Config.Resources.Experiments["experiment_one"].Experiment) - assert.Equal(t, "IS_OWNER", pipelines["nyc_taxi_pipeline"].Permissions[1].Level) - assert.Equal(t, "my_user_name", pipelines["nyc_taxi_pipeline"].Permissions[1].UserName) +} + +func TestRunAsErrorForPipelines(t *testing.T) { + b := load(t, "./run_as/not_allowed/pipelines") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + + configPath := filepath.FromSlash("run_as/not_allowed/pipelines/databricks.yml") + assert.EqualError(t, err, fmt.Sprintf("pipelines are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath)) +} + +func TestRunAsNoErrorForPipelines(t *testing.T) { + b := load(t, "./run_as/not_allowed/pipelines") + + // We should not error because the pipeline is being deployed with the same + // identity as the bundle run_as identity. + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, diags.Error()) +} + +func TestRunAsErrorForModelServing(t *testing.T) { + b := load(t, "./run_as/not_allowed/model_serving") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + + configPath := filepath.FromSlash("run_as/not_allowed/model_serving/databricks.yml") + assert.EqualError(t, err, fmt.Sprintf("model_serving_endpoints are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath)) +} + +func TestRunAsNoErrorForModelServingEndpoints(t *testing.T) { + b := load(t, "./run_as/not_allowed/model_serving") + + // We should not error because the model serving endpoint is being deployed + // with the same identity as the bundle run_as identity. + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, diags.Error()) +} + +func TestRunAsErrorWhenBothUserAndSpSpecified(t *testing.T) { + b := load(t, "./run_as/not_allowed/both_sp_and_user") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + + configPath := filepath.FromSlash("run_as/not_allowed/both_sp_and_user/databricks.yml") + assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name \"my_service_principal\" is specified at %s:6:27. A user_name \"my_user_name\" is defined at %s:7:14", configPath, configPath)) +} + +func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) { + b := load(t, "./run_as/not_allowed/neither_sp_nor_user") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + + configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/databricks.yml") + assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:8", configPath)) +} + +func TestRunAsErrorNeitherUserOrSpSpecifiedAtTargetOverride(t *testing.T) { + b := loadTarget(t, "./run_as/not_allowed/neither_sp_nor_user_override", "development") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + + configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user_override/override.yml") + assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:12", configPath)) } From b21e3c81cdcd7462b51139b86193d367932410f4 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 28 Mar 2024 11:32:34 +0100 Subject: [PATCH 104/286] Make bundle loaders return diagnostics (#1319) ## Changes The function signature of Cobra's `PreRunE` function has an `error` return value. We'd like to start returning `diag.Diagnostics` after loading a bundle, so this is incompatible. This change modifies all usage of `PreRunE` to load a bundle to inline function calls in the command's `RunE` function. ## Tests * Unit tests pass. * Integration tests pass. --- cmd/bundle/deploy.go | 14 +-- cmd/bundle/deployment/bind.go | 15 +-- cmd/bundle/deployment/unbind.go | 15 +-- cmd/bundle/destroy.go | 14 +-- cmd/bundle/generate.go | 8 +- cmd/bundle/generate/job.go | 13 +-- cmd/bundle/generate/pipeline.go | 13 +-- cmd/bundle/launch.go | 2 - cmd/bundle/run.go | 19 ++-- cmd/bundle/summary.go | 21 ++-- cmd/bundle/sync.go | 11 +- cmd/bundle/test.go | 3 - cmd/bundle/utils/utils.go | 29 ++++-- cmd/bundle/validate.go | 15 +-- cmd/labs/project/entrypoint.go | 7 +- cmd/root/auth.go | 7 +- cmd/root/bundle.go | 121 ++++++++++++---------- cmd/root/bundle_test.go | 177 +++++++++++++++++++++++--------- 18 files changed, 305 insertions(+), 199 deletions(-) diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index 8b8cb9f2e..919b15a72 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -13,10 +13,9 @@ import ( func newDeployCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "deploy", - Short: "Deploy bundle", - Args: root.NoArgs, - PreRunE: utils.ConfigureBundleWithVariables, + Use: "deploy", + Short: "Deploy bundle", + Args: root.NoArgs, } var force bool @@ -30,7 +29,10 @@ func newDeployCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) diag.Diagnostics { b.Config.Bundle.Force = force @@ -46,7 +48,7 @@ func newDeployCommand() *cobra.Command { return nil }) - diags := bundle.Apply(ctx, b, bundle.Seq( + diags = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), phases.Build(), phases.Deploy(), diff --git a/cmd/bundle/deployment/bind.go b/cmd/bundle/deployment/bind.go index 11c560b12..71f441d3d 100644 --- a/cmd/bundle/deployment/bind.go +++ b/cmd/bundle/deployment/bind.go @@ -16,10 +16,9 @@ import ( func newBindCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "bind KEY RESOURCE_ID", - Short: "Bind bundle-defined resources to existing resources", - Args: root.ExactArgs(2), - PreRunE: utils.ConfigureBundleWithVariables, + Use: "bind KEY RESOURCE_ID", + Short: "Bind bundle-defined resources to existing resources", + Args: root.ExactArgs(2), } var autoApprove bool @@ -29,7 +28,11 @@ func newBindCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } + resource, err := b.Config.Resources.FindResourceByConfigKey(args[0]) if err != nil { return err @@ -50,7 +53,7 @@ func newBindCommand() *cobra.Command { return nil }) - diags := bundle.Apply(ctx, b, bundle.Seq( + diags = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), phases.Bind(&terraform.BindOptions{ AutoApprove: autoApprove, diff --git a/cmd/bundle/deployment/unbind.go b/cmd/bundle/deployment/unbind.go index 76727877f..9de5285a5 100644 --- a/cmd/bundle/deployment/unbind.go +++ b/cmd/bundle/deployment/unbind.go @@ -13,10 +13,9 @@ import ( func newUnbindCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "unbind KEY", - Short: "Unbind bundle-defined resources from its managed remote resource", - Args: root.ExactArgs(1), - PreRunE: utils.ConfigureBundleWithVariables, + Use: "unbind KEY", + Short: "Unbind bundle-defined resources from its managed remote resource", + Args: root.ExactArgs(1), } var forceLock bool @@ -24,7 +23,11 @@ func newUnbindCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } + resource, err := b.Config.Resources.FindResourceByConfigKey(args[0]) if err != nil { return err @@ -35,7 +38,7 @@ func newUnbindCommand() *cobra.Command { return nil }) - diags := bundle.Apply(cmd.Context(), b, bundle.Seq( + diags = bundle.Apply(cmd.Context(), b, bundle.Seq( phases.Initialize(), phases.Unbind(resource.TerraformResourceName(), args[0]), )) diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index 38b717713..cd7e63062 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -18,10 +18,9 @@ import ( func newDestroyCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "destroy", - Short: "Destroy deployed bundle resources", - Args: root.NoArgs, - PreRunE: utils.ConfigureBundleWithVariables, + Use: "destroy", + Short: "Destroy deployed bundle resources", + Args: root.NoArgs, } var autoApprove bool @@ -31,7 +30,10 @@ func newDestroyCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // If `--force-lock` is specified, force acquisition of the deployment lock. @@ -58,7 +60,7 @@ func newDestroyCommand() *cobra.Command { return fmt.Errorf("please specify --auto-approve since selected logging format is json") } - diags := bundle.Apply(ctx, b, bundle.Seq( + diags = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), phases.Build(), phases.Destroy(), diff --git a/cmd/bundle/generate.go b/cmd/bundle/generate.go index 6c48b1586..1e3d56e43 100644 --- a/cmd/bundle/generate.go +++ b/cmd/bundle/generate.go @@ -2,7 +2,6 @@ package bundle import ( "github.com/databricks/cli/cmd/bundle/generate" - "github.com/databricks/cli/cmd/bundle/utils" "github.com/spf13/cobra" ) @@ -10,10 +9,9 @@ func newGenerateCommand() *cobra.Command { var key string cmd := &cobra.Command{ - Use: "generate", - Short: "Generate bundle configuration", - Long: "Generate bundle configuration", - PreRunE: utils.ConfigureBundleWithVariables, + Use: "generate", + Short: "Generate bundle configuration", + Long: "Generate bundle configuration", } cmd.AddCommand(generate.NewGenerateJobCommand()) diff --git a/cmd/bundle/generate/job.go b/cmd/bundle/generate/job.go index c5a94a8f6..99bc61660 100644 --- a/cmd/bundle/generate/job.go +++ b/cmd/bundle/generate/job.go @@ -5,7 +5,6 @@ import ( "os" "path/filepath" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/generate" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" @@ -24,9 +23,8 @@ func NewGenerateJobCommand() *cobra.Command { var force bool cmd := &cobra.Command{ - Use: "job", - Short: "Generate bundle configuration for a job", - PreRunE: root.MustConfigureBundle, + Use: "job", + Short: "Generate bundle configuration for a job", } cmd.Flags().Int64Var(&jobId, "existing-job-id", 0, `Job ID of the job to generate config for`) @@ -43,9 +41,12 @@ func NewGenerateJobCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) - w := b.WorkspaceClient() + b, diags := root.MustConfigureBundle(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } + w := b.WorkspaceClient() job, err := w.Jobs.Get(ctx, jobs.GetJobRequest{JobId: jobId}) if err != nil { return err diff --git a/cmd/bundle/generate/pipeline.go b/cmd/bundle/generate/pipeline.go index 4c5fcf425..bd973fe0b 100644 --- a/cmd/bundle/generate/pipeline.go +++ b/cmd/bundle/generate/pipeline.go @@ -5,7 +5,6 @@ import ( "os" "path/filepath" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/generate" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" @@ -24,9 +23,8 @@ func NewGeneratePipelineCommand() *cobra.Command { var force bool cmd := &cobra.Command{ - Use: "pipeline", - Short: "Generate bundle configuration for a pipeline", - PreRunE: root.MustConfigureBundle, + Use: "pipeline", + Short: "Generate bundle configuration for a pipeline", } cmd.Flags().StringVar(&pipelineId, "existing-pipeline-id", "", `ID of the pipeline to generate config for`) @@ -43,9 +41,12 @@ func NewGeneratePipelineCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) - w := b.WorkspaceClient() + b, diags := root.MustConfigureBundle(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } + w := b.WorkspaceClient() pipeline, err := w.Pipelines.Get(ctx, pipelines.GetPipelineRequest{PipelineId: pipelineId}) if err != nil { return err diff --git a/cmd/bundle/launch.go b/cmd/bundle/launch.go index f376ebdae..0d2b4233b 100644 --- a/cmd/bundle/launch.go +++ b/cmd/bundle/launch.go @@ -16,8 +16,6 @@ func newLaunchCommand() *cobra.Command { // We're not ready to expose this command until we specify its semantics. Hidden: true, - - PreRunE: root.MustConfigureBundle, } cmd.RunE = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 87ea8610c..e6a8e1ba4 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -17,10 +17,9 @@ import ( func newRunCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "run [flags] KEY", - Short: "Run a resource (e.g. a job or a pipeline)", - Args: root.MaximumNArgs(1), - PreRunE: utils.ConfigureBundleWithVariables, + Use: "run [flags] KEY", + Short: "Run a resource (e.g. a job or a pipeline)", + Args: root.MaximumNArgs(1), } var runOptions run.Options @@ -33,9 +32,12 @@ func newRunCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } - diags := bundle.Apply(ctx, b, bundle.Seq( + diags = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), terraform.Interpolate(), terraform.Write(), @@ -109,15 +111,14 @@ func newRunCommand() *cobra.Command { return nil, cobra.ShellCompDirectiveNoFileComp } - err := root.MustConfigureBundle(cmd, args) - if err != nil { + b, diags := root.MustConfigureBundle(cmd) + if err := diags.Error(); err != nil { cobra.CompErrorln(err.Error()) return nil, cobra.ShellCompDirectiveError } // No completion in the context of a bundle. // Source and destination paths are taken from bundle configuration. - b := bundle.GetOrNil(cmd.Context()) if b == nil { return nil, cobra.ShellCompDirectiveNoFileComp } diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index a28ceede9..5a64b46c0 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -18,10 +18,9 @@ import ( func newSummaryCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "summary", - Short: "Describe the bundle resources and their deployment states", - Args: root.NoArgs, - PreRunE: utils.ConfigureBundleWithVariables, + Use: "summary", + Short: "Describe the bundle resources and their deployment states", + Args: root.NoArgs, // This command is currently intended for the Databricks VSCode extension only Hidden: true, @@ -31,14 +30,18 @@ func newSummaryCommand() *cobra.Command { cmd.Flags().BoolVar(&forcePull, "force-pull", false, "Skip local cache and load the state from the remote workspace") cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) + ctx := cmd.Context() + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } - diags := bundle.Apply(cmd.Context(), b, phases.Initialize()) + diags = bundle.Apply(ctx, b, phases.Initialize()) if err := diags.Error(); err != nil { return err } - cacheDir, err := terraform.Dir(cmd.Context(), b) + cacheDir, err := terraform.Dir(ctx, b) if err != nil { return err } @@ -47,7 +50,7 @@ func newSummaryCommand() *cobra.Command { noCache := errors.Is(stateFileErr, os.ErrNotExist) || errors.Is(configFileErr, os.ErrNotExist) if forcePull || noCache { - diags = bundle.Apply(cmd.Context(), b, bundle.Seq( + diags = bundle.Apply(ctx, b, bundle.Seq( terraform.StatePull(), terraform.Interpolate(), terraform.Write(), @@ -57,7 +60,7 @@ func newSummaryCommand() *cobra.Command { } } - diags = bundle.Apply(cmd.Context(), b, terraform.Load()) + diags = bundle.Apply(ctx, b, terraform.Load()) if err := diags.Error(); err != nil { return err } diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index 0b7f9b3a9..0818aecf7 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -36,8 +36,6 @@ func newSyncCommand() *cobra.Command { Use: "sync [flags]", Short: "Synchronize bundle tree to the workspace", Args: root.NoArgs, - - PreRunE: utils.ConfigureBundleWithVariables, } var f syncFlags @@ -46,10 +44,14 @@ func newSyncCommand() *cobra.Command { cmd.Flags().BoolVar(&f.watch, "watch", false, "watch local file system for changes") cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) + ctx := cmd.Context() + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } // Run initialize phase to make sure paths are set. - diags := bundle.Apply(cmd.Context(), b, phases.Initialize()) + diags = bundle.Apply(ctx, b, phases.Initialize()) if err := diags.Error(); err != nil { return err } @@ -59,7 +61,6 @@ func newSyncCommand() *cobra.Command { return err } - ctx := cmd.Context() s, err := sync.New(ctx, *opts) if err != nil { return err diff --git a/cmd/bundle/test.go b/cmd/bundle/test.go index ea1a4b716..4d30e727d 100644 --- a/cmd/bundle/test.go +++ b/cmd/bundle/test.go @@ -3,7 +3,6 @@ package bundle import ( "fmt" - "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) @@ -15,8 +14,6 @@ func newTestCommand() *cobra.Command { // We're not ready to expose this command until we specify its semantics. Hidden: true, - - PreRunE: root.MustConfigureBundle, } cmd.RunE = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/bundle/utils/utils.go b/cmd/bundle/utils/utils.go index e53a40b9d..d585c6220 100644 --- a/cmd/bundle/utils/utils.go +++ b/cmd/bundle/utils/utils.go @@ -9,23 +9,30 @@ import ( "github.com/spf13/cobra" ) -func ConfigureBundleWithVariables(cmd *cobra.Command, args []string) error { +func configureVariables(cmd *cobra.Command, b *bundle.Bundle, variables []string) diag.Diagnostics { + return bundle.ApplyFunc(cmd.Context(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.InitializeVariables(variables) + return diag.FromErr(err) + }) +} + +func ConfigureBundleWithVariables(cmd *cobra.Command) (*bundle.Bundle, diag.Diagnostics) { // Load bundle config and apply target - err := root.MustConfigureBundle(cmd, args) - if err != nil { - return err + b, diags := root.MustConfigureBundle(cmd) + if diags.HasError() { + return nil, diags } variables, err := cmd.Flags().GetStringSlice("var") if err != nil { - return err + return nil, diag.FromErr(err) } // Initialize variables by assigning them values passed as command line flags - b := bundle.Get(cmd.Context()) - diags := bundle.ApplyFunc(cmd.Context(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - err := b.Config.InitializeVariables(variables) - return diag.FromErr(err) - }) - return diags.Error() + diags = diags.Extend(configureVariables(cmd, b, variables)) + if diags.HasError() { + return nil, diags + } + + return b, diags } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 42686b328..57bf6f7b9 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -13,16 +13,19 @@ import ( func newValidateCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "validate", - Short: "Validate configuration", - Args: root.NoArgs, - PreRunE: utils.ConfigureBundleWithVariables, + Use: "validate", + Short: "Validate configuration", + Args: root.NoArgs, } cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) + ctx := cmd.Context() + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } - diags := bundle.Apply(cmd.Context(), b, phases.Initialize()) + diags = bundle.Apply(ctx, b, phases.Initialize()) if err := diags.Error(); err != nil { return err } diff --git a/cmd/labs/project/entrypoint.go b/cmd/labs/project/entrypoint.go index 96f46d4b5..99edf83c8 100644 --- a/cmd/labs/project/entrypoint.go +++ b/cmd/labs/project/entrypoint.go @@ -10,7 +10,6 @@ import ( "path/filepath" "strings" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/cmdio" @@ -203,11 +202,11 @@ func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.C return lc, cfg, nil } if e.IsBundleAware { - err = root.TryConfigureBundle(cmd, []string{}) - if err != nil { + b, diags := root.TryConfigureBundle(cmd) + if err := diags.Error(); err != nil { return nil, nil, fmt.Errorf("bundle: %w", err) } - if b := bundle.GetOrNil(cmd.Context()); b != nil { + if b != nil { log.Infof(ctx, "Using login configuration from Databricks Asset Bundle") return &loginConfig{}, b.WorkspaceClient().Config, nil } diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 89c7641c5..0edfaaa83 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -6,7 +6,6 @@ import ( "fmt" "net/http" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/databricks-sdk-go" @@ -149,11 +148,11 @@ func MustWorkspaceClient(cmd *cobra.Command, args []string) error { // Try to load a bundle configuration if we're allowed to by the caller (see `./auth_options.go`). if !shouldSkipLoadBundle(cmd.Context()) { - err := TryConfigureBundle(cmd, args) - if err != nil { + b, diags := TryConfigureBundle(cmd) + if err := diags.Error(); err != nil { return err } - if b := bundle.GetOrNil(cmd.Context()); b != nil { + if b != nil { client, err := b.InitializeWorkspaceClient() if err != nil { return err diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index 6a6aeb4d2..4ed89c57b 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -4,8 +4,8 @@ import ( "context" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/libs/diag" envlib "github.com/databricks/cli/libs/env" "github.com/spf13/cobra" @@ -50,87 +50,100 @@ func getProfile(cmd *cobra.Command) (value string) { return envlib.Get(cmd.Context(), "DATABRICKS_CONFIG_PROFILE") } -// loadBundle loads the bundle configuration and applies default mutators. -func loadBundle(cmd *cobra.Command, args []string, load func(ctx context.Context) (*bundle.Bundle, error)) (*bundle.Bundle, error) { - ctx := cmd.Context() - b, err := load(ctx) - if err != nil { - return nil, err - } - - // No bundle is fine in case of `TryConfigureBundle`. - if b == nil { - return nil, nil - } - +// configureProfile applies the profile flag to the bundle. +func configureProfile(cmd *cobra.Command, b *bundle.Bundle) diag.Diagnostics { profile := getProfile(cmd) - if profile != "" { - diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - b.Config.Workspace.Profile = profile - return nil - }) - if err := diags.Error(); err != nil { - return nil, err - } - } - - diags := bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) - if err := diags.Error(); err != nil { - return nil, err - } - - return b, nil -} - -// configureBundle loads the bundle configuration and configures it on the command's context. -func configureBundle(cmd *cobra.Command, args []string, load func(ctx context.Context) (*bundle.Bundle, error)) error { - b, err := loadBundle(cmd, args, load) - if err != nil { - return err - } - - // No bundle is fine in case of `TryConfigureBundle`. - if b == nil { + if profile == "" { return nil } + return bundle.ApplyFunc(cmd.Context(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.Profile = profile + return nil + }) +} + +// configureBundle loads the bundle configuration and configures flag values, if any. +func configureBundle(cmd *cobra.Command, b *bundle.Bundle) (*bundle.Bundle, diag.Diagnostics) { var m bundle.Mutator - env := getTarget(cmd) - if env == "" { - m = mutator.SelectDefaultTarget() + if target := getTarget(cmd); target == "" { + m = phases.LoadDefaultTarget() } else { - m = mutator.SelectTarget(env) + m = phases.LoadNamedTarget(target) } + // Load bundle and select target. ctx := cmd.Context() diags := bundle.Apply(ctx, b, m) - if err := diags.Error(); err != nil { - return err + if diags.HasError() { + return nil, diags } - cmd.SetContext(bundle.Context(ctx, b)) - return nil + // Configure the workspace profile if the flag has been set. + diags = diags.Extend(configureProfile(cmd, b)) + if diags.HasError() { + return nil, diags + } + + return b, diags } // MustConfigureBundle configures a bundle on the command context. -func MustConfigureBundle(cmd *cobra.Command, args []string) error { - return configureBundle(cmd, args, bundle.MustLoad) +func MustConfigureBundle(cmd *cobra.Command) (*bundle.Bundle, diag.Diagnostics) { + // A bundle may be configured on the context when testing. + // If it is, return it immediately. + b := bundle.GetOrNil(cmd.Context()) + if b != nil { + return b, nil + } + + b, err := bundle.MustLoad(cmd.Context()) + if err != nil { + return nil, diag.FromErr(err) + } + + return configureBundle(cmd, b) } // TryConfigureBundle configures a bundle on the command context // if there is one, but doesn't fail if there isn't one. -func TryConfigureBundle(cmd *cobra.Command, args []string) error { - return configureBundle(cmd, args, bundle.TryLoad) +func TryConfigureBundle(cmd *cobra.Command) (*bundle.Bundle, diag.Diagnostics) { + // A bundle may be configured on the context when testing. + // If it is, return it immediately. + b := bundle.GetOrNil(cmd.Context()) + if b != nil { + return b, nil + } + + b, err := bundle.TryLoad(cmd.Context()) + if err != nil { + return nil, diag.FromErr(err) + } + + // No bundle is fine in this case. + if b == nil { + return nil, nil + } + + return configureBundle(cmd, b) } // targetCompletion executes to autocomplete the argument to the target flag. func targetCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - b, err := loadBundle(cmd, args, bundle.MustLoad) + ctx := cmd.Context() + b, err := bundle.MustLoad(ctx) if err != nil { cobra.CompErrorln(err.Error()) return nil, cobra.ShellCompDirectiveError } + // Load bundle but don't select a target (we're completing those). + diags := bundle.Apply(ctx, b, phases.Load()) + if err := diags.Error(); err != nil { + cobra.CompErrorln(err.Error()) + return nil, cobra.ShellCompDirectiveError + } + return maps.Keys(b.Config.Targets), cobra.ShellCompDirectiveDefault } diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index 97412ff69..301884287 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -2,16 +2,17 @@ package root import ( "context" + "fmt" "os" "path/filepath" "runtime" "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/internal/testutil" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func setupDatabricksCfg(t *testing.T) { @@ -37,47 +38,61 @@ func emptyCommand(t *testing.T) *cobra.Command { return cmd } -func setup(t *testing.T, cmd *cobra.Command, host string) *bundle.Bundle { +func setupWithHost(t *testing.T, cmd *cobra.Command, host string) *bundle.Bundle { setupDatabricksCfg(t) rootPath := t.TempDir() - testutil.Touch(t, rootPath, "databricks.yml") + testutil.Chdir(t, rootPath) - err := configureBundle(cmd, []string{"validate"}, func(_ context.Context) (*bundle.Bundle, error) { - return &bundle.Bundle{ - RootPath: rootPath, - Config: config.Root{ - Bundle: config.Bundle{ - Name: "test", - }, - Workspace: config.Workspace{ - Host: host, - }, - }, - }, nil - }) - assert.NoError(t, err) - return bundle.Get(cmd.Context()) + contents := fmt.Sprintf(` +workspace: + host: %q +`, host) + err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0644) + require.NoError(t, err) + + b, diags := MustConfigureBundle(cmd) + require.NoError(t, diags.Error()) + return b +} + +func setupWithProfile(t *testing.T, cmd *cobra.Command, profile string) *bundle.Bundle { + setupDatabricksCfg(t) + + rootPath := t.TempDir() + testutil.Chdir(t, rootPath) + + contents := fmt.Sprintf(` +workspace: + profile: %q +`, profile) + err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0644) + require.NoError(t, err) + + b, diags := MustConfigureBundle(cmd) + require.NoError(t, diags.Error()) + return b } func TestBundleConfigureDefault(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - b := setup(t, cmd, "https://x.com") - assert.NotPanics(t, func() { - b.WorkspaceClient() - }) + b := setupWithHost(t, cmd, "https://x.com") + + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://x.com", client.Config.Host) } func TestBundleConfigureWithMultipleMatches(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - b := setup(t, cmd, "https://a.com") - assert.Panics(t, func() { - b.WorkspaceClient() - }) + b := setupWithHost(t, cmd, "https://a.com") + + _, err := b.InitializeWorkspaceClient() + assert.ErrorContains(t, err, "multiple profiles matched: PROFILE-1, PROFILE-2") } func TestBundleConfigureWithNonExistentProfileFlag(t *testing.T) { @@ -85,11 +100,10 @@ func TestBundleConfigureWithNonExistentProfileFlag(t *testing.T) { cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("NOEXIST") + b := setupWithHost(t, cmd, "https://x.com") - b := setup(t, cmd, "https://x.com") - assert.Panics(t, func() { - b.WorkspaceClient() - }) + _, err := b.InitializeWorkspaceClient() + assert.ErrorContains(t, err, "has no NOEXIST profile configured") } func TestBundleConfigureWithMismatchedProfile(t *testing.T) { @@ -97,11 +111,10 @@ func TestBundleConfigureWithMismatchedProfile(t *testing.T) { cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("PROFILE-1") + b := setupWithHost(t, cmd, "https://x.com") - b := setup(t, cmd, "https://x.com") - assert.PanicsWithError(t, "cannot resolve bundle auth configuration: config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com", func() { - b.WorkspaceClient() - }) + _, err := b.InitializeWorkspaceClient() + assert.ErrorContains(t, err, "config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com") } func TestBundleConfigureWithCorrectProfile(t *testing.T) { @@ -109,35 +122,97 @@ func TestBundleConfigureWithCorrectProfile(t *testing.T) { cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("PROFILE-1") + b := setupWithHost(t, cmd, "https://a.com") - b := setup(t, cmd, "https://a.com") - assert.NotPanics(t, func() { - b.WorkspaceClient() - }) + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "PROFILE-1", client.Config.Profile) } func TestBundleConfigureWithMismatchedProfileEnvVariable(t *testing.T) { testutil.CleanupEnvironment(t) - t.Setenv("DATABRICKS_CONFIG_PROFILE", "PROFILE-1") + t.Setenv("DATABRICKS_CONFIG_PROFILE", "PROFILE-1") cmd := emptyCommand(t) - b := setup(t, cmd, "https://x.com") - assert.PanicsWithError(t, "cannot resolve bundle auth configuration: config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com", func() { - b.WorkspaceClient() - }) + b := setupWithHost(t, cmd, "https://x.com") + + _, err := b.InitializeWorkspaceClient() + assert.ErrorContains(t, err, "config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com") } func TestBundleConfigureWithProfileFlagAndEnvVariable(t *testing.T) { testutil.CleanupEnvironment(t) - t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") + t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("PROFILE-1") + b := setupWithHost(t, cmd, "https://a.com") - b := setup(t, cmd, "https://a.com") - assert.NotPanics(t, func() { - b.WorkspaceClient() - }) + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "PROFILE-1", client.Config.Profile) +} + +func TestBundleConfigureProfileDefault(t *testing.T) { + testutil.CleanupEnvironment(t) + + // The profile in the databricks.yml file is used + cmd := emptyCommand(t) + b := setupWithProfile(t, cmd, "PROFILE-1") + + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "a", client.Config.Token) + assert.Equal(t, "PROFILE-1", client.Config.Profile) +} + +func TestBundleConfigureProfileFlag(t *testing.T) { + testutil.CleanupEnvironment(t) + + // The --profile flag takes precedence over the profile in the databricks.yml file + cmd := emptyCommand(t) + cmd.Flag("profile").Value.Set("PROFILE-2") + b := setupWithProfile(t, cmd, "PROFILE-1") + + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "b", client.Config.Token) + assert.Equal(t, "PROFILE-2", client.Config.Profile) +} + +func TestBundleConfigureProfileEnvVariable(t *testing.T) { + testutil.CleanupEnvironment(t) + + // The DATABRICKS_CONFIG_PROFILE environment variable takes precedence over the profile in the databricks.yml file + t.Setenv("DATABRICKS_CONFIG_PROFILE", "PROFILE-2") + cmd := emptyCommand(t) + b := setupWithProfile(t, cmd, "PROFILE-1") + + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "b", client.Config.Token) + assert.Equal(t, "PROFILE-2", client.Config.Profile) +} + +func TestBundleConfigureProfileFlagAndEnvVariable(t *testing.T) { + testutil.CleanupEnvironment(t) + + // The --profile flag takes precedence over the DATABRICKS_CONFIG_PROFILE environment variable + t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") + cmd := emptyCommand(t) + cmd.Flag("profile").Value.Set("PROFILE-2") + b := setupWithProfile(t, cmd, "PROFILE-1") + + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "b", client.Config.Token) + assert.Equal(t, "PROFILE-2", client.Config.Profile) } func TestTargetFlagFull(t *testing.T) { @@ -149,7 +224,7 @@ func TestTargetFlagFull(t *testing.T) { err := cmd.ExecuteContext(ctx) assert.NoError(t, err) - assert.Equal(t, getTarget(cmd), "development") + assert.Equal(t, "development", getTarget(cmd)) } func TestTargetFlagShort(t *testing.T) { @@ -161,7 +236,7 @@ func TestTargetFlagShort(t *testing.T) { err := cmd.ExecuteContext(ctx) assert.NoError(t, err) - assert.Equal(t, getTarget(cmd), "production") + assert.Equal(t, "production", getTarget(cmd)) } // TODO: remove when environment flag is fully deprecated @@ -175,5 +250,5 @@ func TestTargetEnvironmentFlag(t *testing.T) { err := cmd.ExecuteContext(ctx) assert.NoError(t, err) - assert.Equal(t, getTarget(cmd), "development") + assert.Equal(t, "development", getTarget(cmd)) } From eea34b25040751862d6b720fa43f9e962970978d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 28 Mar 2024 11:59:03 +0100 Subject: [PATCH 105/286] Return diagnostics from `config.Load` (#1324) ## Changes We no longer need to store load diagnostics on the `config.Root` type itself and instead can return them from the `config.Load` call directly. It is up to the caller of this function to append them to previous diagnostics, if any. Background: previous commits moved configuration loading of the entry point into a mutator, so now all diagnostics naturally flow from applying mutators. This PR depends on #1319. ## Tests Unit and manual validation of the debug statements in the validate command. --- bundle/config/loader/entry_point.go | 12 +++++----- bundle/config/loader/process_include.go | 14 +++++++----- bundle/config/root.go | 30 ++++++++----------------- bundle/config/root_test.go | 18 +++++++-------- cmd/bundle/validate.go | 2 +- 5 files changed, 34 insertions(+), 42 deletions(-) diff --git a/bundle/config/loader/entry_point.go b/bundle/config/loader/entry_point.go index 24ba2f068..2c73a5825 100644 --- a/bundle/config/loader/entry_point.go +++ b/bundle/config/loader/entry_point.go @@ -24,11 +24,13 @@ func (m *entryPoint) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics if err != nil { return diag.FromErr(err) } - this, err := config.Load(path) - if err != nil { - return diag.FromErr(err) + this, diags := config.Load(path) + if diags.HasError() { + return diags } - // TODO: Return actual warnings. err = b.Config.Merge(this) - return diag.FromErr(err) + if err != nil { + diags = diags.Extend(diag.FromErr(err)) + } + return diags } diff --git a/bundle/config/loader/process_include.go b/bundle/config/loader/process_include.go index 328f4eacf..7cf9a17d7 100644 --- a/bundle/config/loader/process_include.go +++ b/bundle/config/loader/process_include.go @@ -27,11 +27,13 @@ func (m *processInclude) Name() string { } func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { - this, err := config.Load(m.fullPath) - if err != nil { - return diag.FromErr(err) + this, diags := config.Load(m.fullPath) + if diags.HasError() { + return diags } - // TODO: Return actual warnings. - err = b.Config.Merge(this) - return diag.FromErr(err) + err := b.Config.Merge(this) + if err != nil { + diags = diags.Extend(diag.FromErr(err)) + } + return diags } diff --git a/bundle/config/root.go b/bundle/config/root.go index 0e54c04ce..18b548d64 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -20,7 +20,6 @@ import ( type Root struct { value dyn.Value - diags diag.Diagnostics depth int // Contains user defined variables @@ -69,10 +68,10 @@ type Root struct { } // Load loads the bundle configuration file at the specified path. -func Load(path string) (*Root, error) { +func Load(path string) (*Root, diag.Diagnostics) { raw, err := os.ReadFile(path) if err != nil { - return nil, err + return nil, diag.FromErr(err) } r := Root{} @@ -80,31 +79,29 @@ func Load(path string) (*Root, error) { // Load configuration tree from YAML. v, err := yamlloader.LoadYAML(path, bytes.NewBuffer(raw)) if err != nil { - return nil, fmt.Errorf("failed to load %s: %w", path, err) + return nil, diag.Errorf("failed to load %s: %v", path, err) } // Rewrite configuration tree where necessary. v, err = rewriteShorthands(v) if err != nil { - return nil, fmt.Errorf("failed to rewrite %s: %w", path, err) + return nil, diag.Errorf("failed to rewrite %s: %v", path, err) } // Normalize dynamic configuration tree according to configuration type. v, diags := convert.Normalize(r, v) - // Keep track of diagnostics (warnings and errors in the schema). - // We delay acting on diagnostics until we have loaded all - // configuration files and merged them together. - r.diags = diags - // Convert normalized configuration tree to typed configuration. err = r.updateWithDynamicValue(v) if err != nil { - return nil, fmt.Errorf("failed to load %s: %w", path, err) + return nil, diag.Errorf("failed to load %s: %v", path, err) } _, err = r.Resources.VerifyUniqueResourceIdentifiers() - return &r, err + if err != nil { + diags = diags.Extend(diag.FromErr(err)) + } + return &r, diags } func (r *Root) initializeDynamicValue() error { @@ -126,11 +123,9 @@ func (r *Root) initializeDynamicValue() error { func (r *Root) updateWithDynamicValue(nv dyn.Value) error { // Hack: restore state; it may be cleared by [ToTyped] if // the configuration equals nil (happens in tests). - diags := r.diags depth := r.depth defer func() { - r.diags = diags r.depth = depth }() @@ -224,10 +219,6 @@ func (r *Root) MarkMutatorExit(ctx context.Context) error { return nil } -func (r *Root) Diagnostics() diag.Diagnostics { - return r.diags -} - // SetConfigFilePath configures the path that its configuration // was loaded from in configuration leafs that require it. func (r *Root) ConfigureConfigFilePath() { @@ -261,9 +252,6 @@ func (r *Root) InitializeVariables(vars []string) error { } func (r *Root) Merge(other *Root) error { - // Merge diagnostics. - r.diags = append(r.diags, other.diags...) - // Check for safe merge, protecting against duplicate resource identifiers err := r.Resources.VerifySafeMerge(&other.Resources) if err != nil { diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index 3b25fb1f8..b56768848 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -25,24 +25,24 @@ func TestRootMarshalUnmarshal(t *testing.T) { } func TestRootLoad(t *testing.T) { - root, err := Load("../tests/basic/databricks.yml") - require.NoError(t, err) + root, diags := Load("../tests/basic/databricks.yml") + require.NoError(t, diags.Error()) assert.Equal(t, "basic", root.Bundle.Name) } func TestDuplicateIdOnLoadReturnsError(t *testing.T) { - _, err := Load("./testdata/duplicate_resource_names_in_root/databricks.yml") - assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)") + _, diags := Load("./testdata/duplicate_resource_names_in_root/databricks.yml") + assert.ErrorContains(t, diags.Error(), "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)") } func TestDuplicateIdOnMergeReturnsError(t *testing.T) { - root, err := Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml") - require.NoError(t, err) + root, diags := Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml") + require.NoError(t, diags.Error()) - other, err := Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml") - require.NoError(t, err) + other, diags := Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml") + require.NoError(t, diags.Error()) - err = root.Merge(other) + err := root.Merge(other) assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml, pipeline at ./testdata/duplicate_resource_name_in_subconfiguration/resources.yml)") } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 57bf6f7b9..e625539b4 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -32,7 +32,7 @@ func newValidateCommand() *cobra.Command { // Until we change up the output of this command to be a text representation, // we'll just output all diagnostics as debug logs. - for _, diag := range b.Config.Diagnostics() { + for _, diag := range diags { log.Debugf(cmd.Context(), "[%s]: %s", diag.Location, diag.Summary) } From cddc5f97f862805d885ea3b0d1d497d51e0a59fa Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 28 Mar 2024 16:55:36 +0530 Subject: [PATCH 106/286] Fix the generated DABs JSON schema (#1322) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This PR fixes bundle schema being broken because `for_each_task: null` was set in the generated schema. This is not valid according to the JSON schema specification and thus the Red Hat YAML VSCode extension was failing to parse the YAML configuration. This PR fixes: https://github.com/databricks/cli/issues/1312 ## Tests The fix itself was tested manually. I asserted that the autocompletion works now. This was mistakenly overlooked the first time around when the regression was introduced in https://github.com/databricks/cli/pull/1204 because the YAML extension provides best-effort autocomplete suggestions even if the JSON schema fails to load. To prevent future regressions we also add a test to assert that the JSON schema generated itself is a valid JSON schema object. This is done via using the `ajv-cli` to validate the schema. This package is also used by the Red Hat YAML extension and thus provides a high fidelity check for ensuring the JSON schema is valid. Before, with the old schema: ``` shreyas.goenka@THW32HFW6T cli-versions % ajv validate -s proj/schema-216.json -d ../bundle-playground-3/databricks.yml schema proj/schema-216.json is invalid error: schema is invalid: data/properties/resources/properties/jobs/additionalProperties/properties/tasks/items/properties/for_each_task must be object,boolean, data/properties/resources/properties/jobs/additionalProperties/properties/tasks/items must be array, data/properties/resources/properties/jobs/additionalProperties/properties/tasks/items must match a schema in anyOf ``` After, with the new schema: ``` shreyas.goenka@THW32HFW6T cli-versions % ajv validate -s proj/schema-dev.json -d ../bundle-playground-3/databricks.yml ../bundle-playground-3/databricks.yml valid ``` After, autocomplete suggestions: Screenshot 2024-03-27 at 6 35 57 PM --- .github/workflows/push.yml | 26 ++++++++++++++++++++++++++ bundle/schema/schema.go | 4 +++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 18ba54a37..244bdeee5 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -89,3 +89,29 @@ jobs: run: | # Exit with status code 1 if there are differences (i.e. unformatted files) git diff --exit-code + + validate-bundle-schema: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: 1.21.x + + # Github repo: https://github.com/ajv-validator/ajv-cli + - name: Install ajv-cli + run: npm install -g ajv-cli@5.0.0 + + # Assert that the generated bundle schema is a valid JSON schema by using + # ajv-cli to validate it against a sample configuration file. + # By default the ajv-cli runs in strict mode which will fail if the schema + # itself is not valid. Strict mode is more strict than the JSON schema + # specification. See for details: https://ajv.js.org/options.html#strict-mode-options + - name: Validate bundle schema + run: | + go run main.go bundle schema > schema.json + ajv -s schema.json -d ./bundle/tests/basic/databricks.yml diff --git a/bundle/schema/schema.go b/bundle/schema/schema.go index 7153f38f6..b37f72d9b 100644 --- a/bundle/schema/schema.go +++ b/bundle/schema/schema.go @@ -95,7 +95,9 @@ func safeToSchema(golangType reflect.Type, docs *Docs, traceId string, tracker * // HACK to unblock CLI release (13th Feb 2024). This is temporary until proper // support for recursive types is added to the schema generator. PR: https://github.com/databricks/cli/pull/1204 if traceId == "for_each_task" { - return nil, nil + return &jsonschema.Schema{ + Type: jsonschema.ObjectType, + }, nil } // WE ERROR OUT IF THERE ARE CYCLES IN THE JSON SCHEMA From dca81a40f44b6d05dd612e3b4df4df733b01c1a7 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 2 Apr 2024 14:17:29 +0200 Subject: [PATCH 107/286] Return warning for nil primitive types during normalization (#1329) ## Changes It's not necessary to error out if a configuration field is present but not set. For example, the following would error out, but after this change only produces a warning: ```yaml workspace: # This is a string field, but if not specified, it ends up being a null. host: ``` ## Tests Updated the unit tests to match the new behavior. --------- Co-authored-by: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> --- libs/dyn/convert/normalize.go | 20 ++++++++++++++++++++ libs/dyn/convert/normalize_test.go | 16 ++++++++-------- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index f18b27fd2..ff4d94b88 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -61,6 +61,14 @@ func (n normalizeOptions) normalizeType(typ reflect.Type, src dyn.Value, seen [] return dyn.InvalidValue, diag.Errorf("unsupported type: %s", typ.Kind()) } +func nullWarning(expected dyn.Kind, src dyn.Value) diag.Diagnostic { + return diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("expected a %s value, found null", expected), + Location: src.Location(), + } +} + func typeMismatch(expected dyn.Kind, src dyn.Value) diag.Diagnostic { return diag.Diagnostic{ Severity: diag.Error, @@ -229,6 +237,9 @@ func (n normalizeOptions) normalizeString(typ reflect.Type, src dyn.Value) (dyn. out = strconv.FormatInt(src.MustInt(), 10) case dyn.KindFloat: out = strconv.FormatFloat(src.MustFloat(), 'f', -1, 64) + case dyn.KindNil: + // Return a warning if the field is present but has a null value. + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindString, src)) default: return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindString, src)) } @@ -259,6 +270,9 @@ func (n normalizeOptions) normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Va // Cannot interpret as a boolean. return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src)) } + case dyn.KindNil: + // Return a warning if the field is present but has a null value. + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindBool, src)) default: return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src)) } @@ -288,6 +302,9 @@ func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value) (dyn.Val Location: src.Location(), }) } + case dyn.KindNil: + // Return a warning if the field is present but has a null value. + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindInt, src)) default: return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindInt, src)) } @@ -317,6 +334,9 @@ func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value) (dyn.V Location: src.Location(), }) } + case dyn.KindNil: + // Return a warning if the field is present but has a null value. + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindFloat, src)) default: return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindFloat, src)) } diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 78c487d3f..66e781bb8 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -407,8 +407,8 @@ func TestNormalizeStringNil(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, - Summary: `expected string, found nil`, + Severity: diag.Warning, + Summary: `expected a string value, found null`, Location: vin.Location(), }, err[0]) } @@ -463,8 +463,8 @@ func TestNormalizeBoolNil(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, - Summary: `expected bool, found nil`, + Severity: diag.Warning, + Summary: `expected a bool value, found null`, Location: vin.Location(), }, err[0]) } @@ -536,8 +536,8 @@ func TestNormalizeIntNil(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, - Summary: `expected int, found nil`, + Severity: diag.Warning, + Summary: `expected a int value, found null`, Location: vin.Location(), }, err[0]) } @@ -596,8 +596,8 @@ func TestNormalizeFloatNil(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, - Summary: `expected float, found nil`, + Severity: diag.Warning, + Summary: `expected a float value, found null`, Location: vin.Location(), }, err[0]) } From 56e393c7435cf4392846a2cf3b59e33426dcece3 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 2 Apr 2024 14:55:21 +0200 Subject: [PATCH 108/286] Allow specifying CLI version constraints required to run the bundle (#1320) ## Changes Allow specifying CLI version constraints required to run the bundle Example of configuration: #### only allow specific version ``` bundle: name: my-bundle databricks_cli_version: "0.210.0" ``` #### allow all patch releases ``` bundle: name: my-bundle databricks_cli_version: "0.210.*" ``` #### constrain minimum version ``` bundle: name: my-bundle databricks_cli_version: ">= 0.210.0" ``` #### constrain range ``` bundle: name: my-bundle databricks_cli_version: ">= 0.210.0, <= 1.0.0" ``` For other examples see: https://github.com/Masterminds/semver?tab=readme-ov-file#checking-version-constraints Example error ``` sh-3.2$ databricks bundle validate Error: Databricks CLI version constraint not satisfied. Required: >= 1.0.0, current: 0.216.0 ``` ## Tests Added unit test cover all possible configuration permutations --------- Co-authored-by: Lennart Kats (databricks) --- NOTICE | 4 + bundle/config/bundle.go | 3 + bundle/config/mutator/mutator.go | 3 + bundle/config/mutator/verify_cli_version.go | 82 +++++++++ .../config/mutator/verify_cli_version_test.go | 174 ++++++++++++++++++ go.mod | 4 +- go.sum | 2 + internal/build/variables.go | 6 + 8 files changed, 276 insertions(+), 2 deletions(-) create mode 100644 bundle/config/mutator/verify_cli_version.go create mode 100644 bundle/config/mutator/verify_cli_version_test.go diff --git a/NOTICE b/NOTICE index fdc2a88cf..e356d028e 100644 --- a/NOTICE +++ b/NOTICE @@ -73,6 +73,10 @@ ghodss/yaml - https://github.com/ghodss/yaml Copyright (c) 2014 Sam Ghods License - https://github.com/ghodss/yaml/blob/master/LICENSE +Masterminds/semver - https://github.com/Masterminds/semver +Copyright (C) 2014-2019, Matt Butcher and Matt Farina +License - https://github.com/Masterminds/semver/blob/master/LICENSE.txt + mattn/go-isatty - https://github.com/mattn/go-isatty Copyright (c) Yasuhiro MATSUMOTO https://github.com/mattn/go-isatty/blob/master/LICENSE diff --git a/bundle/config/bundle.go b/bundle/config/bundle.go index 6f991e562..78648dfd7 100644 --- a/bundle/config/bundle.go +++ b/bundle/config/bundle.go @@ -43,4 +43,7 @@ type Bundle struct { // Deployment section specifies deployment related configuration for bundle Deployment Deployment `json:"deployment,omitempty"` + + // Databricks CLI version constraints required to run the bundle. + DatabricksCliVersion string `json:"databricks_cli_version,omitempty"` } diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index 99b7e9ac9..fda118271 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -12,6 +12,9 @@ func DefaultMutators() []bundle.Mutator { loader.EntryPoint(), loader.ProcessRootIncludes(), + // Verify that the CLI version is within the specified range. + VerifyCliVersion(), + // Execute preinit script after loading all configuration files. scripts.Execute(config.ScriptPreInit), EnvironmentsToTargets(), diff --git a/bundle/config/mutator/verify_cli_version.go b/bundle/config/mutator/verify_cli_version.go new file mode 100644 index 000000000..9c32fcc9d --- /dev/null +++ b/bundle/config/mutator/verify_cli_version.go @@ -0,0 +1,82 @@ +package mutator + +import ( + "context" + "fmt" + "regexp" + + semver "github.com/Masterminds/semver/v3" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/libs/diag" +) + +func VerifyCliVersion() bundle.Mutator { + return &verifyCliVersion{} +} + +type verifyCliVersion struct { +} + +func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + // No constraints specified, skip the check. + if b.Config.Bundle.DatabricksCliVersion == "" { + return nil + } + + constraint := b.Config.Bundle.DatabricksCliVersion + if err := validateConstraintSyntax(constraint); err != nil { + return diag.FromErr(err) + } + currentVersion := build.GetInfo().Version + c, err := semver.NewConstraint(constraint) + if err != nil { + return diag.FromErr(err) + } + + version, err := semver.NewVersion(currentVersion) + if err != nil { + return diag.Errorf("parsing CLI version %q failed", currentVersion) + } + + if !c.Check(version) { + return diag.Errorf("Databricks CLI version constraint not satisfied. Required: %s, current: %s", constraint, currentVersion) + } + + return nil +} + +func (v *verifyCliVersion) Name() string { + return "VerifyCliVersion" +} + +// validateConstraintSyntax validates the syntax of the version constraint. +func validateConstraintSyntax(constraint string) error { + r := generateConstraintSyntaxRegexp() + if !r.MatchString(constraint) { + return fmt.Errorf("invalid version constraint %q specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)", constraint) + } + + return nil +} + +// Generate regexp which matches the supported version constraint syntax. +func generateConstraintSyntaxRegexp() *regexp.Regexp { + // We intentionally only support the format supported by requirements.txt: + // 1. 0.0.0 + // 2. >= 0.0.0 + // 3. <= 0.0.0 + // 4. > 0.0.0 + // 5. < 0.0.0 + // 6. != 0.0.0 + // 7. 0.0.* + // 8. 0.* + // 9. >= 0.0.0, <= 1.0.0 + // 10. 0.0.0-0 + // 11. 0.0.0-beta + // 12. >= 0.0.0-0, <= 1.0.0-0 + + matchVersion := `(\d+\.\d+\.\d+(\-\w+)?|\d+\.\d+.\*|\d+\.\*)` + matchOperators := `(>=|<=|>|<|!=)?` + return regexp.MustCompile(fmt.Sprintf(`^%s ?%s(, %s %s)?$`, matchOperators, matchVersion, matchOperators, matchVersion)) +} diff --git a/bundle/config/mutator/verify_cli_version_test.go b/bundle/config/mutator/verify_cli_version_test.go new file mode 100644 index 000000000..24f656745 --- /dev/null +++ b/bundle/config/mutator/verify_cli_version_test.go @@ -0,0 +1,174 @@ +package mutator + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal/build" + "github.com/stretchr/testify/require" +) + +type testCase struct { + currentVersion string + constraint string + expectedError string +} + +func TestVerifyCliVersion(t *testing.T) { + testCases := []testCase{ + { + currentVersion: "0.0.1", + }, + { + currentVersion: "0.0.1", + constraint: "0.100.0", + expectedError: "Databricks CLI version constraint not satisfied. Required: 0.100.0, current: 0.0.1", + }, + { + currentVersion: "0.0.1", + constraint: ">= 0.100.0", + expectedError: "Databricks CLI version constraint not satisfied. Required: >= 0.100.0, current: 0.0.1", + }, + { + currentVersion: "0.100.0", + constraint: "0.100.0", + }, + { + currentVersion: "0.100.1", + constraint: "0.100.0", + expectedError: "Databricks CLI version constraint not satisfied. Required: 0.100.0, current: 0.100.1", + }, + { + currentVersion: "0.100.1", + constraint: ">= 0.100.0", + }, + { + currentVersion: "0.100.0", + constraint: "<= 1.0.0", + }, + { + currentVersion: "1.0.0", + constraint: "<= 1.0.0", + }, + { + currentVersion: "1.0.0", + constraint: "<= 0.100.0", + expectedError: "Databricks CLI version constraint not satisfied. Required: <= 0.100.0, current: 1.0.0", + }, + { + currentVersion: "0.99.0", + constraint: ">= 0.100.0, <= 0.100.2", + expectedError: "Databricks CLI version constraint not satisfied. Required: >= 0.100.0, <= 0.100.2, current: 0.99.0", + }, + { + currentVersion: "0.100.0", + constraint: ">= 0.100.0, <= 0.100.2", + }, + { + currentVersion: "0.100.1", + constraint: ">= 0.100.0, <= 0.100.2", + }, + { + currentVersion: "0.100.2", + constraint: ">= 0.100.0, <= 0.100.2", + }, + { + currentVersion: "0.101.0", + constraint: ">= 0.100.0, <= 0.100.2", + expectedError: "Databricks CLI version constraint not satisfied. Required: >= 0.100.0, <= 0.100.2, current: 0.101.0", + }, + { + currentVersion: "0.100.0-beta", + constraint: ">= 0.100.0, <= 0.100.2", + expectedError: "Databricks CLI version constraint not satisfied. Required: >= 0.100.0, <= 0.100.2, current: 0.100.0-beta", + }, + { + currentVersion: "0.100.0-beta", + constraint: ">= 0.100.0-0, <= 0.100.2-0", + }, + { + currentVersion: "0.100.1-beta", + constraint: ">= 0.100.0-0, <= 0.100.2-0", + }, + { + currentVersion: "0.100.3-beta", + constraint: ">= 0.100.0, <= 0.100.2", + expectedError: "Databricks CLI version constraint not satisfied. Required: >= 0.100.0, <= 0.100.2, current: 0.100.3-beta", + }, + { + currentVersion: "0.100.123", + constraint: "0.100.*", + }, + { + currentVersion: "0.100.123", + constraint: "^0.100", + expectedError: "invalid version constraint \"^0.100\" specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)", + }, + } + + t.Cleanup(func() { + // Reset the build version to the default version + // so that it doesn't affect other tests + // It doesn't really matter what we configure this to when testing + // as long as it is a valid semver version. + build.SetBuildVersion(build.DefaultSemver) + }) + + for i, tc := range testCases { + t.Run(fmt.Sprintf("testcase #%d", i), func(t *testing.T) { + build.SetBuildVersion(tc.currentVersion) + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + DatabricksCliVersion: tc.constraint, + }, + }, + } + diags := bundle.Apply(context.Background(), b, VerifyCliVersion()) + if tc.expectedError != "" { + require.NotEmpty(t, diags) + require.Equal(t, tc.expectedError, diags.Error().Error()) + } else { + require.Empty(t, diags) + } + }) + } +} + +func TestValidateConstraint(t *testing.T) { + testCases := []struct { + constraint string + expected bool + }{ + {"0.0.0", true}, + {">= 0.0.0", true}, + {"<= 0.0.0", true}, + {"> 0.0.0", true}, + {"< 0.0.0", true}, + {"!= 0.0.0", true}, + {"0.0.*", true}, + {"0.*", true}, + {">= 0.0.0, <= 1.0.0", true}, + {">= 0.0.0-0, <= 1.0.0-0", true}, + {"0.0.0-0", true}, + {"0.0.0-beta", true}, + {"^0.0.0", false}, + {"~0.0.0", false}, + {"0.0.0 1.0.0", false}, + {"> 0.0.0 < 1.0.0", false}, + } + + for _, tc := range testCases { + t.Run(tc.constraint, func(t *testing.T) { + err := validateConstraintSyntax(tc.constraint) + if tc.expected { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} diff --git a/go.mod b/go.mod index d9e6c24f0..88fb8faeb 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/databricks/cli go 1.21 require ( + github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.0 // Apache 2.0 github.com/databricks/databricks-sdk-go v0.36.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT @@ -27,10 +28,9 @@ require ( golang.org/x/term v0.18.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 + gopkg.in/yaml.v3 v3.0.1 ) -require gopkg.in/yaml.v3 v3.0.1 - require ( cloud.google.com/go/compute v1.23.4 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect diff --git a/go.sum b/go.sum index a4a6eb40b..fc978c841 100644 --- a/go.sum +++ b/go.sum @@ -6,6 +6,8 @@ cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2Aawl dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= diff --git a/internal/build/variables.go b/internal/build/variables.go index 096657c6e..197dee9c3 100644 --- a/internal/build/variables.go +++ b/internal/build/variables.go @@ -16,3 +16,9 @@ var buildPatch string = "0" var buildPrerelease string = "" var buildIsSnapshot string = "false" var buildTimestamp string = "0" + +// This function is used to set the build version for testing purposes. +func SetBuildVersion(version string) { + buildVersion = version + info.Version = version +} From 079c416f8d0e4600901c3704304a52c140a862eb Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Tue, 2 Apr 2024 14:56:27 +0200 Subject: [PATCH 109/286] Add `bundle debug terraform` command (#1294) - Add `bundle debug terraform` command. It prints versions of the Terraform and the Databricks Terraform provider. In the text mode it also explains how to setup the CLI in environments with restricted internet access. - Use `DATABRICKS_TF_EXEC_PATH` env var to point Databricks CLI to the Terraform binary. The CLI only uses it if `DATABRICKS_TF_VERSION` matches the currently used terraform version. - Use `DATABRICKS_TF_CLI_CONFIG_FILE` env var to point Terraform CLI config that points to the filesystem mirror for the Databricks provider. The CLI only uses it if `DATABRICKS_TF_PROVIDER_VERSION` matches the currently used provider version. Relevant PR on the VSCode extension side: https://github.com/databricks/databricks-vscode/pull/1147 Example output of the `databricks bundle debug terraform`: ``` Terraform version: 1.5.5 Terraform URL: https://releases.hashicorp.com/terraform/1.5.5 Databricks Terraform Provider version: 1.38.0 Databricks Terraform Provider URL: https://github.com/databricks/terraform-provider-databricks/releases/tag/v1.38.0 Databricks CLI downloads its Terraform dependencies automatically. If you run the CLI in an air-gapped environment, you can download the dependencies manually and set these environment variables: DATABRICKS_TF_VERSION=1.5.5 DATABRICKS_TF_EXEC_PATH=/path/to/terraform/binary DATABRICKS_TF_PROVIDER_VERSION=1.38.0 DATABRICKS_TF_CLI_CONFIG_FILE=/path/to/terraform/cli/config.tfrc Here is an example *.tfrc configuration file: disable_checkpoint = true provider_installation { filesystem_mirror { path = "/path/to/a/folder/with/databricks/terraform/provider" } } The filesystem mirror path should point to the folder with the Databricks Terraform Provider. The folder should have this structure: /registry.terraform.io/databricks/databricks/terraform-provider-databricks_1.38.0_ARCH.zip For more information about filesystem mirrors, see the Terraform documentation: https://developer.hashicorp.com/terraform/cli/config/config-file#filesystem_mirror ``` --------- Co-authored-by: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> --- bundle/deploy/terraform/init.go | 58 ++++++++- bundle/deploy/terraform/init_test.go | 123 ++++++++++++++++++ bundle/deploy/terraform/pkg.go | 30 +++++ .../tf/codegen/templates/root.go.tmpl | 8 +- bundle/internal/tf/schema/root.go | 8 +- cmd/bundle/bundle.go | 1 + cmd/bundle/debug.go | 18 +++ cmd/bundle/debug/terraform.go | 78 +++++++++++ 8 files changed, 317 insertions(+), 7 deletions(-) create mode 100644 cmd/bundle/debug.go create mode 100644 cmd/bundle/debug/terraform.go diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index ca1fc8caf..9f4235310 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -12,10 +12,10 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/internal/tf/schema" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/log" - "github.com/hashicorp/go-version" "github.com/hashicorp/hc-install/product" "github.com/hashicorp/hc-install/releases" "github.com/hashicorp/terraform-exec/tfexec" @@ -40,6 +40,17 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con return tf.ExecPath, nil } + // Load exec path from the environment if it matches the currently used version. + envExecPath, err := getEnvVarWithMatchingVersion(ctx, TerraformExecPathEnv, TerraformVersionEnv, TerraformVersion.String()) + if err != nil { + return "", err + } + if envExecPath != "" { + tf.ExecPath = envExecPath + log.Debugf(ctx, "Using Terraform from %s at %s", TerraformExecPathEnv, tf.ExecPath) + return tf.ExecPath, nil + } + binDir, err := b.CacheDir(context.Background(), "bin") if err != nil { return "", err @@ -60,7 +71,7 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con // Download Terraform to private bin directory. installer := &releases.ExactVersion{ Product: product.Terraform, - Version: version.Must(version.NewVersion("1.5.5")), + Version: TerraformVersion, InstallDir: binDir, Timeout: 1 * time.Minute, } @@ -98,14 +109,55 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error { } // Include $TF_CLI_CONFIG_FILE to override terraform provider in development. - configFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE") + // See: https://developer.hashicorp.com/terraform/cli/config/config-file#explicit-installation-method-configuration + devConfigFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE") if ok { + environ["TF_CLI_CONFIG_FILE"] = devConfigFile + } + + // Map $DATABRICKS_TF_CLI_CONFIG_FILE to $TF_CLI_CONFIG_FILE + // VSCode extension provides a file with the "provider_installation.filesystem_mirror" configuration. + // We only use it if the provider version matches the currently used version, + // otherwise terraform will fail to download the right version (even with unrestricted internet access). + configFile, err := getEnvVarWithMatchingVersion(ctx, TerraformCliConfigPathEnv, TerraformProviderVersionEnv, schema.ProviderVersion) + if err != nil { + return err + } + if configFile != "" { + log.Debugf(ctx, "Using Terraform CLI config from %s at %s", TerraformCliConfigPathEnv, configFile) environ["TF_CLI_CONFIG_FILE"] = configFile } return nil } +// Example: this function will return a value of TF_EXEC_PATH only if the path exists and if TF_VERSION matches the TerraformVersion. +// This function is used for env vars set by the Databricks VSCode extension. The variables are intended to be used by the CLI +// bundled with the Databricks VSCode extension, but users can use different CLI versions in the VSCode terminals, in which case we want to ignore +// the variables if that CLI uses different versions of the dependencies. +func getEnvVarWithMatchingVersion(ctx context.Context, envVarName string, versionVarName string, currentVersion string) (string, error) { + envValue := env.Get(ctx, envVarName) + versionValue := env.Get(ctx, versionVarName) + if envValue == "" || versionValue == "" { + log.Debugf(ctx, "%s and %s aren't defined", envVarName, versionVarName) + return "", nil + } + if versionValue != currentVersion { + log.Debugf(ctx, "%s as %s does not match the current version %s, ignoring %s", versionVarName, versionValue, currentVersion, envVarName) + return "", nil + } + _, err := os.Stat(envValue) + if err != nil { + if os.IsNotExist(err) { + log.Debugf(ctx, "%s at %s does not exist, ignoring %s", envVarName, envValue, versionVarName) + return "", nil + } else { + return "", err + } + } + return envValue, nil +} + // This function sets temp dir location for terraform to use. If user does not // specify anything here, we fall back to a `tmp` directory in the bundle's cache // directory diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index 29bd80a3e..ece897193 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -4,12 +4,16 @@ import ( "context" "os" "os/exec" + "path/filepath" "runtime" "strings" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/env" + "github.com/hashicorp/hc-install/product" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -269,3 +273,122 @@ func TestSetUserProfileFromInheritEnvVars(t *testing.T) { assert.Contains(t, env, "USERPROFILE") assert.Equal(t, env["USERPROFILE"], "c:\\foo\\c") } + +func TestInheritEnvVarsWithAbsentTFConfigFile(t *testing.T) { + ctx := context.Background() + envMap := map[string]string{} + ctx = env.Set(ctx, "DATABRICKS_TF_PROVIDER_VERSION", schema.ProviderVersion) + ctx = env.Set(ctx, "DATABRICKS_TF_CLI_CONFIG_FILE", "/tmp/config.tfrc") + err := inheritEnvVars(ctx, envMap) + require.NoError(t, err) + require.NotContains(t, envMap, "TF_CLI_CONFIG_FILE") +} + +func TestInheritEnvVarsWithWrongTFProviderVersion(t *testing.T) { + ctx := context.Background() + envMap := map[string]string{} + configFile := createTempFile(t, t.TempDir(), "config.tfrc", false) + ctx = env.Set(ctx, "DATABRICKS_TF_PROVIDER_VERSION", "wrong") + ctx = env.Set(ctx, "DATABRICKS_TF_CLI_CONFIG_FILE", configFile) + err := inheritEnvVars(ctx, envMap) + require.NoError(t, err) + require.NotContains(t, envMap, "TF_CLI_CONFIG_FILE") +} + +func TestInheritEnvVarsWithCorrectTFCLIConfigFile(t *testing.T) { + ctx := context.Background() + envMap := map[string]string{} + configFile := createTempFile(t, t.TempDir(), "config.tfrc", false) + ctx = env.Set(ctx, "DATABRICKS_TF_PROVIDER_VERSION", schema.ProviderVersion) + ctx = env.Set(ctx, "DATABRICKS_TF_CLI_CONFIG_FILE", configFile) + err := inheritEnvVars(ctx, envMap) + require.NoError(t, err) + require.Contains(t, envMap, "TF_CLI_CONFIG_FILE") + require.Equal(t, configFile, envMap["TF_CLI_CONFIG_FILE"]) +} + +func TestFindExecPathFromEnvironmentWithWrongVersion(t *testing.T) { + ctx := context.Background() + m := &initialize{} + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + Terraform: &config.Terraform{}, + }, + }, + } + // Create a pre-existing terraform bin to avoid downloading it + cacheDir, _ := b.CacheDir(ctx, "bin") + existingExecPath := createTempFile(t, cacheDir, product.Terraform.BinaryName(), true) + // Create a new terraform binary and expose it through env vars + tmpBinPath := createTempFile(t, t.TempDir(), "terraform-bin", true) + ctx = env.Set(ctx, "DATABRICKS_TF_VERSION", "1.2.3") + ctx = env.Set(ctx, "DATABRICKS_TF_EXEC_PATH", tmpBinPath) + _, err := m.findExecPath(ctx, b, b.Config.Bundle.Terraform) + require.NoError(t, err) + require.Equal(t, existingExecPath, b.Config.Bundle.Terraform.ExecPath) +} + +func TestFindExecPathFromEnvironmentWithCorrectVersionAndNoBinary(t *testing.T) { + ctx := context.Background() + m := &initialize{} + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + Terraform: &config.Terraform{}, + }, + }, + } + // Create a pre-existing terraform bin to avoid downloading it + cacheDir, _ := b.CacheDir(ctx, "bin") + existingExecPath := createTempFile(t, cacheDir, product.Terraform.BinaryName(), true) + + ctx = env.Set(ctx, "DATABRICKS_TF_VERSION", TerraformVersion.String()) + ctx = env.Set(ctx, "DATABRICKS_TF_EXEC_PATH", "/tmp/terraform") + _, err := m.findExecPath(ctx, b, b.Config.Bundle.Terraform) + require.NoError(t, err) + require.Equal(t, existingExecPath, b.Config.Bundle.Terraform.ExecPath) +} + +func TestFindExecPathFromEnvironmentWithCorrectVersionAndBinary(t *testing.T) { + ctx := context.Background() + m := &initialize{} + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + Terraform: &config.Terraform{}, + }, + }, + } + // Create a pre-existing terraform bin to avoid downloading it + cacheDir, _ := b.CacheDir(ctx, "bin") + createTempFile(t, cacheDir, product.Terraform.BinaryName(), true) + // Create a new terraform binary and expose it through env vars + tmpBinPath := createTempFile(t, t.TempDir(), "terraform-bin", true) + ctx = env.Set(ctx, "DATABRICKS_TF_VERSION", TerraformVersion.String()) + ctx = env.Set(ctx, "DATABRICKS_TF_EXEC_PATH", tmpBinPath) + _, err := m.findExecPath(ctx, b, b.Config.Bundle.Terraform) + require.NoError(t, err) + require.Equal(t, tmpBinPath, b.Config.Bundle.Terraform.ExecPath) +} + +func createTempFile(t *testing.T, dest string, name string, executable bool) string { + binPath := filepath.Join(dest, name) + f, err := os.Create(binPath) + require.NoError(t, err) + defer func() { + err = f.Close() + require.NoError(t, err) + }() + if executable { + err = f.Chmod(0777) + require.NoError(t, err) + } + return binPath +} diff --git a/bundle/deploy/terraform/pkg.go b/bundle/deploy/terraform/pkg.go index 2d9293d1b..911583f29 100644 --- a/bundle/deploy/terraform/pkg.go +++ b/bundle/deploy/terraform/pkg.go @@ -1,4 +1,34 @@ package terraform +import ( + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/hashicorp/go-version" +) + const TerraformStateFileName = "terraform.tfstate" const TerraformConfigFileName = "bundle.tf.json" + +// Users can provide their own terraform binary and databricks terraform provider by setting the following environment variables. +// This allows users to use the CLI in an air-gapped environments. See the `debug terraform` command. +const TerraformExecPathEnv = "DATABRICKS_TF_EXEC_PATH" +const TerraformVersionEnv = "DATABRICKS_TF_VERSION" +const TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE" +const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" + +var TerraformVersion = version.Must(version.NewVersion("1.5.5")) + +type TerraformMetadata struct { + Version string `json:"version"` + ProviderHost string `json:"providerHost"` + ProviderSource string `json:"providerSource"` + ProviderVersion string `json:"providerVersion"` +} + +func NewTerraformMetadata() *TerraformMetadata { + return &TerraformMetadata{ + Version: TerraformVersion.String(), + ProviderHost: schema.ProviderHost, + ProviderSource: schema.ProviderSource, + ProviderVersion: schema.ProviderVersion, + } +} diff --git a/bundle/internal/tf/codegen/templates/root.go.tmpl b/bundle/internal/tf/codegen/templates/root.go.tmpl index 57fa71299..e03e978f0 100644 --- a/bundle/internal/tf/codegen/templates/root.go.tmpl +++ b/bundle/internal/tf/codegen/templates/root.go.tmpl @@ -19,13 +19,17 @@ type Root struct { Resource *Resources `json:"resource,omitempty"` } +const ProviderHost = "registry.terraform.io" +const ProviderSource = "databricks/databricks" +const ProviderVersion = "{{ .ProviderVersion }}" + func NewRoot() *Root { return &Root{ Terraform: map[string]interface{}{ "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ - "source": "databricks/databricks", - "version": "{{ .ProviderVersion }}", + "source": ProviderSource, + "version": ProviderVersion, }, }, }, diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 118e2857d..395326329 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -19,13 +19,17 @@ type Root struct { Resource *Resources `json:"resource,omitempty"` } +const ProviderHost = "registry.terraform.io" +const ProviderSource = "databricks/databricks" +const ProviderVersion = "1.38.0" + func NewRoot() *Root { return &Root{ Terraform: map[string]interface{}{ "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ - "source": "databricks/databricks", - "version": "1.38.0", + "source": ProviderSource, + "version": ProviderVersion, }, }, }, diff --git a/cmd/bundle/bundle.go b/cmd/bundle/bundle.go index 43a9ef680..1db60d585 100644 --- a/cmd/bundle/bundle.go +++ b/cmd/bundle/bundle.go @@ -25,6 +25,7 @@ func New() *cobra.Command { cmd.AddCommand(newInitCommand()) cmd.AddCommand(newSummaryCommand()) cmd.AddCommand(newGenerateCommand()) + cmd.AddCommand(newDebugCommand()) cmd.AddCommand(deployment.NewDeploymentCommand()) return cmd } diff --git a/cmd/bundle/debug.go b/cmd/bundle/debug.go new file mode 100644 index 000000000..42d16eab5 --- /dev/null +++ b/cmd/bundle/debug.go @@ -0,0 +1,18 @@ +package bundle + +import ( + "github.com/databricks/cli/cmd/bundle/debug" + "github.com/spf13/cobra" +) + +func newDebugCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "debug", + Short: "Debug information about bundles", + Long: "Debug information about bundles", + // This command group is currently intended for the Databricks VSCode extension only + Hidden: true, + } + cmd.AddCommand(debug.NewTerraformCommand()) + return cmd +} diff --git a/cmd/bundle/debug/terraform.go b/cmd/bundle/debug/terraform.go new file mode 100644 index 000000000..843ecac4e --- /dev/null +++ b/cmd/bundle/debug/terraform.go @@ -0,0 +1,78 @@ +package debug + +import ( + "encoding/json" + "fmt" + + "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/spf13/cobra" +) + +type Dependencies struct { + Terraform *terraform.TerraformMetadata `json:"terraform"` +} + +func NewTerraformCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "terraform", + Short: "Prints Terraform dependencies required for the bundle commands", + Args: root.NoArgs, + Annotations: map[string]string{ + "template": `Terraform version: {{.Version}} +Terraform URL: https://releases.hashicorp.com/terraform/{{.Version}} + +Databricks Terraform Provider version: {{.ProviderVersion}} +Databricks Terraform Provider URL: https://github.com/databricks/terraform-provider-databricks/releases/tag/v{{.ProviderVersion}} + +Databricks CLI downloads its Terraform dependencies automatically. + +If you run the CLI in an air-gapped environment, you can download the dependencies manually and set these environment variables: + + DATABRICKS_TF_VERSION={{.Version}} + DATABRICKS_TF_EXEC_PATH=/path/to/terraform/binary + DATABRICKS_TF_PROVIDER_VERSION={{.ProviderVersion}} + DATABRICKS_TF_CLI_CONFIG_FILE=/path/to/terraform/cli/config.tfrc + +Here is an example *.tfrc configuration file: + + disable_checkpoint = true + provider_installation { + filesystem_mirror { + path = "/path/to/a/folder/with/databricks/terraform/provider" + } + } + +The filesystem mirror path should point to the folder with the Databricks Terraform Provider. The folder should have this structure: /{{.ProviderHost}}/{{.ProviderSource}}/terraform-provider-databricks_{{.ProviderVersion}}_ARCH.zip + +For more information about filesystem mirrors, see the Terraform documentation: https://developer.hashicorp.com/terraform/cli/config/config-file#filesystem_mirror +`, + }, + // This command is currently intended for the Databricks VSCode extension only + Hidden: true, + } + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + dependencies := &Dependencies{ + Terraform: terraform.NewTerraformMetadata(), + } + switch root.OutputType(cmd) { + case flags.OutputText: + cmdio.Render(cmd.Context(), dependencies.Terraform) + case flags.OutputJSON: + buf, err := json.MarshalIndent(dependencies, "", " ") + if err != nil { + return err + } + cmd.OutOrStdout().Write(buf) + default: + return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) + } + + return nil + } + + return cmd +} From 8c144a2de4a160fc2bb1a0587f6177b68ee314dd Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 3 Apr 2024 10:14:04 +0200 Subject: [PATCH 110/286] Added `auth describe` command (#1244) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This command provide details on auth configuration user is using as well as authenticated user and auth mechanism used. Relies on https://github.com/databricks/databricks-sdk-go/pull/838 (tests will fail until merged) Examples of output ``` Workspace: https://test.com User: andrew.nester@databricks.com Authenticated with: pat ----- Configuration: ✓ auth_type: pat ✓ host: https://test.com (from bundle) ✓ profile: DEFAULT (from --profile flag) ✓ token: ******** (from /Users/andrew.nester/.databrickscfg config file) ``` ``` DATABRICKS_AUTH_TYPE=azure-msi databricks auth describe -p "Azure 2" Unable to authenticate: inner token: Post "https://foobar.com/oauth2/token": AADSTS900023: Specified tenant identifier foobar_aaaaaaa' is neither a valid DNS name, nor a valid external domain. See https://login.microsoftonline.com/error?code=900023 ----- Configuration: ✓ auth_type: azure-msi (from DATABRICKS_AUTH_TYPE environment variable) ✓ azure_client_id: 8470f3ba-aaaa-bbbb-cccc-xxxxyyyyzzzz (from /Users/andrew.nester/.databrickscfg config file) ~ azure_client_secret: ******** (from /Users/andrew.nester/.databrickscfg config file, not used for auth type azure-msi) ~ azure_tenant_id: foobar_aaaaaaa (from /Users/andrew.nester/.databrickscfg config file, not used for auth type azure-msi) ✓ azure_use_msi: true (from /Users/andrew.nester/.databrickscfg config file) ✓ host: https://foobar.com (from /Users/andrew.nester/.databrickscfg config file) ✓ profile: Azure 2 (from --profile flag) ``` For account ``` Unable to authenticate: default auth: databricks-cli: cannot get access token: Error: token refresh: Post "https://xxxxxxx.com/v1/token": http 400: {"error":"invalid_request","error_description":"Refresh token is invalid"} . Config: host=https://xxxxxxx.com, account_id=ed0ca3c5-fae5-4619-bb38-eebe04a4af4b, profile=ACCOUNT-ed0ca3c5-fae5-4619-bb38-eebe04a4af4b ----- Configuration: ✓ account_id: ed0ca3c5-fae5-4619-bb38-eebe04a4af4b (from /Users/andrew.nester/.databrickscfg config file) ✓ auth_type: databricks-cli (from /Users/andrew.nester/.databrickscfg config file) ✓ host: https://xxxxxxxxx.com (from /Users/andrew.nester/.databrickscfg config file) ✓ profile: ACCOUNT-ed0ca3c5-fae5-4619-bb38-eebe04a4af4b ``` ## Tests Added unit tests --------- Co-authored-by: Julia Crawford (Databricks) --- bundle/config/workspace.go | 21 +++- cmd/auth/auth.go | 1 + cmd/auth/describe.go | 192 +++++++++++++++++++++++++++++++ cmd/auth/describe_test.go | 217 +++++++++++++++++++++++++++++++++++ cmd/root/auth.go | 71 +++++++++++- cmd/root/auth_test.go | 78 +++++++++++++ libs/cmdio/render.go | 6 + libs/databrickscfg/loader.go | 7 +- 8 files changed, 583 insertions(+), 10 deletions(-) create mode 100644 cmd/auth/describe.go create mode 100644 cmd/auth/describe_test.go diff --git a/bundle/config/workspace.go b/bundle/config/workspace.go index 5f8691bab..efc5caa66 100644 --- a/bundle/config/workspace.go +++ b/bundle/config/workspace.go @@ -78,8 +78,8 @@ func (s User) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { - cfg := config.Config{ +func (w *Workspace) Config() *config.Config { + cfg := &config.Config{ // Generic Host: w.Host, Profile: w.Profile, @@ -101,6 +101,19 @@ func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { AzureLoginAppID: w.AzureLoginAppID, } + for k := range config.ConfigAttributes { + attr := &config.ConfigAttributes[k] + if !attr.IsZero(cfg) { + cfg.SetAttrSource(attr, config.Source{Type: config.SourceType("bundle")}) + } + } + + return cfg +} + +func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { + cfg := w.Config() + // If only the host is configured, we try and unambiguously match it to // a profile in the user's databrickscfg file. Override the default loaders. if w.Host != "" && w.Profile == "" { @@ -124,13 +137,13 @@ func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { // Now that the configuration is resolved, we can verify that the host in the bundle configuration // is identical to the host associated with the selected profile. if w.Host != "" && w.Profile != "" { - err := databrickscfg.ValidateConfigAndProfileHost(&cfg, w.Profile) + err := databrickscfg.ValidateConfigAndProfileHost(cfg, w.Profile) if err != nil { return nil, err } } - return databricks.NewWorkspaceClient((*databricks.Config)(&cfg)) + return databricks.NewWorkspaceClient((*databricks.Config)(cfg)) } func init() { diff --git a/cmd/auth/auth.go b/cmd/auth/auth.go index e0c7c7c5b..59de76111 100644 --- a/cmd/auth/auth.go +++ b/cmd/auth/auth.go @@ -22,6 +22,7 @@ func New() *cobra.Command { cmd.AddCommand(newLoginCommand(&perisistentAuth)) cmd.AddCommand(newProfilesCommand()) cmd.AddCommand(newTokenCommand(&perisistentAuth)) + cmd.AddCommand(newDescribeCommand()) return cmd } diff --git a/cmd/auth/describe.go b/cmd/auth/describe.go new file mode 100644 index 000000000..125b0731a --- /dev/null +++ b/cmd/auth/describe.go @@ -0,0 +1,192 @@ +package auth + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/config" + "github.com/spf13/cobra" +) + +var authTemplate = `{{"Host:" | bold}} {{.Details.Host}} +{{- if .AccountID}} +{{"Account ID:" | bold}} {{.AccountID}} +{{- end}} +{{- if .Username}} +{{"User:" | bold}} {{.Username}} +{{- end}} +{{"Authenticated with:" | bold}} {{.Details.AuthType}} +----- +` + configurationTemplate + +var errorTemplate = `Unable to authenticate: {{.Error}} +----- +` + configurationTemplate + +const configurationTemplate = `Current configuration: + {{- $details := .Status.Details}} + {{- range $a := .ConfigAttributes}} + {{- $k := $a.Name}} + {{- if index $details.Configuration $k}} + {{- $v := index $details.Configuration $k}} + {{if $v.AuthTypeMismatch}}~{{else}}✓{{end}} {{$k | bold}}: {{$v.Value}} + {{- if not (eq $v.Source.String "dynamic configuration")}} + {{- " (from" | italic}} {{$v.Source.String | italic}} + {{- if $v.AuthTypeMismatch}}, {{ "not used for auth type " | red | italic }}{{$details.AuthType | red | italic}}{{end}}) + {{- end}} + {{- end}} + {{- end}} +` + +func newDescribeCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "describe", + Short: "Describes the credentials and the source of those credentials, being used by the CLI to authenticate", + } + + var showSensitive bool + cmd.Flags().BoolVar(&showSensitive, "sensitive", false, "Include sensitive fields like passwords and tokens in the output") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + var status *authStatus + var err error + status, err = getAuthStatus(cmd, args, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { + isAccount, err := root.MustAnyClient(cmd, args) + return root.ConfigUsed(cmd.Context()), isAccount, err + }) + + if err != nil { + return err + } + + if status.Error != nil { + return render(ctx, cmd, status, errorTemplate) + } + + return render(ctx, cmd, status, authTemplate) + } + + return cmd +} + +type tryAuth func(cmd *cobra.Command, args []string) (*config.Config, bool, error) + +func getAuthStatus(cmd *cobra.Command, args []string, showSensitive bool, fn tryAuth) (*authStatus, error) { + cfg, isAccount, err := fn(cmd, args) + ctx := cmd.Context() + if err != nil { + return &authStatus{ + Status: "error", + Error: err, + Details: getAuthDetails(cmd, cfg, showSensitive), + }, nil + } + + if isAccount { + a := root.AccountClient(ctx) + + // Doing a simple API call to check if the auth is valid + _, err := a.Workspaces.List(ctx) + if err != nil { + return &authStatus{ + Status: "error", + Error: err, + Details: getAuthDetails(cmd, cfg, showSensitive), + }, nil + } + + status := authStatus{ + Status: "success", + Details: getAuthDetails(cmd, a.Config, showSensitive), + AccountID: a.Config.AccountID, + Username: a.Config.Username, + } + + return &status, nil + } + + w := root.WorkspaceClient(ctx) + me, err := w.CurrentUser.Me(ctx) + if err != nil { + return &authStatus{ + Status: "error", + Error: err, + Details: getAuthDetails(cmd, cfg, showSensitive), + }, nil + } + + status := authStatus{ + Status: "success", + Details: getAuthDetails(cmd, w.Config, showSensitive), + Username: me.UserName, + } + + return &status, nil +} + +func render(ctx context.Context, cmd *cobra.Command, status *authStatus, template string) error { + switch root.OutputType(cmd) { + case flags.OutputText: + return cmdio.RenderWithTemplate(ctx, map[string]any{ + "Status": status, + "ConfigAttributes": config.ConfigAttributes, + }, "", template) + case flags.OutputJSON: + buf, err := json.MarshalIndent(status, "", " ") + if err != nil { + return err + } + cmd.OutOrStdout().Write(buf) + default: + return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) + } + + return nil +} + +type authStatus struct { + Status string `json:"status"` + Error error `json:"error,omitempty"` + Username string `json:"username,omitempty"` + AccountID string `json:"account_id,omitempty"` + Details config.AuthDetails `json:"details"` +} + +func getAuthDetails(cmd *cobra.Command, cfg *config.Config, showSensitive bool) config.AuthDetails { + var opts []config.AuthDetailsOptions + if showSensitive { + opts = append(opts, config.ShowSensitive) + } + details := cfg.GetAuthDetails(opts...) + + for k, v := range details.Configuration { + if k == "profile" && cmd.Flag("profile").Changed { + v.Source = config.Source{Type: config.SourceType("flag"), Name: "--profile"} + } + + if k == "host" && cmd.Flag("host").Changed { + v.Source = config.Source{Type: config.SourceType("flag"), Name: "--host"} + } + } + + // If profile is not set explicitly, default to "default" + if _, ok := details.Configuration["profile"]; !ok { + profile := cfg.Profile + if profile == "" { + profile = "default" + } + details.Configuration["profile"] = &config.AttrConfig{Value: profile, Source: config.Source{Type: config.SourceDynamicConfig}} + } + + // Unset source for databricks_cli_path because it can't be overridden anyway + if v, ok := details.Configuration["databricks_cli_path"]; ok { + v.Source = config.Source{Type: config.SourceDynamicConfig} + } + + return details +} diff --git a/cmd/auth/describe_test.go b/cmd/auth/describe_test.go new file mode 100644 index 000000000..d0260abc7 --- /dev/null +++ b/cmd/auth/describe_test.go @@ -0,0 +1,217 @@ +package auth + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/spf13/cobra" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestGetWorkspaceAuthStatus(t *testing.T) { + ctx := context.Background() + m := mocks.NewMockWorkspaceClient(t) + ctx = root.SetWorkspaceClient(ctx, m.WorkspaceClient) + + cmd := &cobra.Command{} + cmd.SetContext(ctx) + + showSensitive := false + + currentUserApi := m.GetMockCurrentUserAPI() + currentUserApi.EXPECT().Me(mock.Anything).Return(&iam.User{ + UserName: "test-user", + }, nil) + + cmd.Flags().String("host", "", "") + cmd.Flags().String("profile", "", "") + cmd.Flag("profile").Value.Set("my-profile") + cmd.Flag("profile").Changed = true + + cfg := &config.Config{ + Profile: "my-profile", + } + m.WorkspaceClient.Config = cfg + t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") + config.ConfigAttributes.Configure(cfg) + + status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { + config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + "host": "https://test.com", + "token": "test-token", + "auth_type": "azure-cli", + }) + return cfg, false, nil + }) + require.NoError(t, err) + require.NotNil(t, status) + require.Equal(t, "success", status.Status) + require.Equal(t, "test-user", status.Username) + require.Equal(t, "https://test.com", status.Details.Host) + require.Equal(t, "azure-cli", status.Details.AuthType) + + require.Equal(t, "azure-cli", status.Details.Configuration["auth_type"].Value) + require.Equal(t, "DATABRICKS_AUTH_TYPE environment variable", status.Details.Configuration["auth_type"].Source.String()) + require.False(t, status.Details.Configuration["auth_type"].AuthTypeMismatch) + + require.Equal(t, "********", status.Details.Configuration["token"].Value) + require.Equal(t, "dynamic configuration", status.Details.Configuration["token"].Source.String()) + require.True(t, status.Details.Configuration["token"].AuthTypeMismatch) + + require.Equal(t, "my-profile", status.Details.Configuration["profile"].Value) + require.Equal(t, "--profile flag", status.Details.Configuration["profile"].Source.String()) + require.False(t, status.Details.Configuration["profile"].AuthTypeMismatch) +} + +func TestGetWorkspaceAuthStatusError(t *testing.T) { + ctx := context.Background() + m := mocks.NewMockWorkspaceClient(t) + ctx = root.SetWorkspaceClient(ctx, m.WorkspaceClient) + + cmd := &cobra.Command{} + cmd.SetContext(ctx) + + showSensitive := false + + cmd.Flags().String("host", "", "") + cmd.Flags().String("profile", "", "") + cmd.Flag("profile").Value.Set("my-profile") + cmd.Flag("profile").Changed = true + + cfg := &config.Config{ + Profile: "my-profile", + } + m.WorkspaceClient.Config = cfg + t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") + config.ConfigAttributes.Configure(cfg) + + status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { + config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + "host": "https://test.com", + "token": "test-token", + "auth_type": "azure-cli", + }) + return cfg, false, fmt.Errorf("auth error") + }) + require.NoError(t, err) + require.NotNil(t, status) + require.Equal(t, "error", status.Status) + + require.Equal(t, "azure-cli", status.Details.Configuration["auth_type"].Value) + require.Equal(t, "DATABRICKS_AUTH_TYPE environment variable", status.Details.Configuration["auth_type"].Source.String()) + require.False(t, status.Details.Configuration["auth_type"].AuthTypeMismatch) + + require.Equal(t, "********", status.Details.Configuration["token"].Value) + require.Equal(t, "dynamic configuration", status.Details.Configuration["token"].Source.String()) + require.True(t, status.Details.Configuration["token"].AuthTypeMismatch) + + require.Equal(t, "my-profile", status.Details.Configuration["profile"].Value) + require.Equal(t, "--profile flag", status.Details.Configuration["profile"].Source.String()) + require.False(t, status.Details.Configuration["profile"].AuthTypeMismatch) +} + +func TestGetWorkspaceAuthStatusSensitive(t *testing.T) { + ctx := context.Background() + m := mocks.NewMockWorkspaceClient(t) + ctx = root.SetWorkspaceClient(ctx, m.WorkspaceClient) + + cmd := &cobra.Command{} + cmd.SetContext(ctx) + + showSensitive := true + + cmd.Flags().String("host", "", "") + cmd.Flags().String("profile", "", "") + cmd.Flag("profile").Value.Set("my-profile") + cmd.Flag("profile").Changed = true + + cfg := &config.Config{ + Profile: "my-profile", + } + m.WorkspaceClient.Config = cfg + t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") + config.ConfigAttributes.Configure(cfg) + + status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { + config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + "host": "https://test.com", + "token": "test-token", + "auth_type": "azure-cli", + }) + return cfg, false, fmt.Errorf("auth error") + }) + require.NoError(t, err) + require.NotNil(t, status) + require.Equal(t, "error", status.Status) + + require.Equal(t, "azure-cli", status.Details.Configuration["auth_type"].Value) + require.Equal(t, "DATABRICKS_AUTH_TYPE environment variable", status.Details.Configuration["auth_type"].Source.String()) + require.False(t, status.Details.Configuration["auth_type"].AuthTypeMismatch) + + require.Equal(t, "test-token", status.Details.Configuration["token"].Value) + require.Equal(t, "dynamic configuration", status.Details.Configuration["token"].Source.String()) + require.True(t, status.Details.Configuration["token"].AuthTypeMismatch) +} + +func TestGetAccountAuthStatus(t *testing.T) { + ctx := context.Background() + m := mocks.NewMockAccountClient(t) + ctx = root.SetAccountClient(ctx, m.AccountClient) + + cmd := &cobra.Command{} + cmd.SetContext(ctx) + + showSensitive := false + + cmd.Flags().String("host", "", "") + cmd.Flags().String("profile", "", "") + cmd.Flag("profile").Value.Set("my-profile") + cmd.Flag("profile").Changed = true + + cfg := &config.Config{ + Profile: "my-profile", + } + m.AccountClient.Config = cfg + t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") + config.ConfigAttributes.Configure(cfg) + + wsApi := m.GetMockWorkspacesAPI() + wsApi.EXPECT().List(mock.Anything).Return(nil, nil) + + status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { + config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + "account_id": "test-account-id", + "username": "test-user", + "host": "https://test.com", + "token": "test-token", + "auth_type": "azure-cli", + }) + return cfg, true, nil + }) + require.NoError(t, err) + require.NotNil(t, status) + require.Equal(t, "success", status.Status) + + require.Equal(t, "test-user", status.Username) + require.Equal(t, "https://test.com", status.Details.Host) + require.Equal(t, "azure-cli", status.Details.AuthType) + require.Equal(t, "test-account-id", status.AccountID) + + require.Equal(t, "azure-cli", status.Details.Configuration["auth_type"].Value) + require.Equal(t, "DATABRICKS_AUTH_TYPE environment variable", status.Details.Configuration["auth_type"].Source.String()) + require.False(t, status.Details.Configuration["auth_type"].AuthTypeMismatch) + + require.Equal(t, "********", status.Details.Configuration["token"].Value) + require.Equal(t, "dynamic configuration", status.Details.Configuration["token"].Source.String()) + require.True(t, status.Details.Configuration["token"].AuthTypeMismatch) + + require.Equal(t, "my-profile", status.Details.Configuration["profile"].Value) + require.Equal(t, "--profile flag", status.Details.Configuration["profile"].Source.String()) + require.False(t, status.Details.Configuration["profile"].AuthTypeMismatch) +} diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 0edfaaa83..387b67f0d 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -17,6 +17,23 @@ import ( // Placeholders to use as unique keys in context.Context. var workspaceClient int var accountClient int +var configUsed int + +type ErrNoWorkspaceProfiles struct { + path string +} + +func (e ErrNoWorkspaceProfiles) Error() string { + return fmt.Sprintf("%s does not contain workspace profiles; please create one by running 'databricks configure'", e.path) +} + +type ErrNoAccountProfiles struct { + path string +} + +func (e ErrNoAccountProfiles) Error() string { + return fmt.Sprintf("%s does not contain account profiles", e.path) +} func initProfileFlag(cmd *cobra.Command) { cmd.PersistentFlags().StringP("profile", "p", "", "~/.databrickscfg profile") @@ -67,6 +84,29 @@ func accountClientOrPrompt(ctx context.Context, cfg *config.Config, allowPrompt return a, err } +func MustAnyClient(cmd *cobra.Command, args []string) (bool, error) { + // Try to create a workspace client + werr := MustWorkspaceClient(cmd, args) + if werr == nil { + return false, nil + } + + // If the error is other than "not a workspace client error" or "no workspace profiles", + // return it because configuration is for workspace client + // and we don't want to try to create an account client. + if !errors.Is(werr, databricks.ErrNotWorkspaceClient) && !errors.As(werr, &ErrNoWorkspaceProfiles{}) { + return false, werr + } + + // Otherwise, the config used is account client one, so try to create an account client + aerr := MustAccountClient(cmd, args) + if errors.As(aerr, &ErrNoAccountProfiles{}) { + return false, aerr + } + + return true, aerr +} + func MustAccountClient(cmd *cobra.Command, args []string) error { cfg := &config.Config{} @@ -76,6 +116,10 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { cfg.Profile = profile } + ctx := cmd.Context() + ctx = context.WithValue(ctx, &configUsed, cfg) + cmd.SetContext(ctx) + if cfg.Profile == "" { // account-level CLI was not really done before, so here are the assumptions: // 1. only admins will have account configured @@ -98,7 +142,8 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { return err } - cmd.SetContext(context.WithValue(cmd.Context(), &accountClient, a)) + ctx = context.WithValue(ctx, &accountClient, a) + cmd.SetContext(ctx) return nil } @@ -146,13 +191,20 @@ func MustWorkspaceClient(cmd *cobra.Command, args []string) error { cfg.Profile = profile } + ctx := cmd.Context() + ctx = context.WithValue(ctx, &configUsed, cfg) + cmd.SetContext(ctx) + // Try to load a bundle configuration if we're allowed to by the caller (see `./auth_options.go`). if !shouldSkipLoadBundle(cmd.Context()) { b, diags := TryConfigureBundle(cmd) if err := diags.Error(); err != nil { return err } + if b != nil { + ctx = context.WithValue(ctx, &configUsed, b.Config.Workspace.Config()) + cmd.SetContext(ctx) client, err := b.InitializeWorkspaceClient() if err != nil { return err @@ -167,7 +219,6 @@ func MustWorkspaceClient(cmd *cobra.Command, args []string) error { return err } - ctx := cmd.Context() ctx = context.WithValue(ctx, &workspaceClient, w) cmd.SetContext(ctx) return nil @@ -177,6 +228,10 @@ func SetWorkspaceClient(ctx context.Context, w *databricks.WorkspaceClient) cont return context.WithValue(ctx, &workspaceClient, w) } +func SetAccountClient(ctx context.Context, a *databricks.AccountClient) context.Context { + return context.WithValue(ctx, &accountClient, a) +} + func AskForWorkspaceProfile(ctx context.Context) (string, error) { path, err := databrickscfg.GetPath(ctx) if err != nil { @@ -188,7 +243,7 @@ func AskForWorkspaceProfile(ctx context.Context) (string, error) { } switch len(profiles) { case 0: - return "", fmt.Errorf("%s does not contain workspace profiles; please create one by running 'databricks configure'", path) + return "", ErrNoWorkspaceProfiles{path: path} case 1: return profiles[0].Name, nil } @@ -221,7 +276,7 @@ func AskForAccountProfile(ctx context.Context) (string, error) { } switch len(profiles) { case 0: - return "", fmt.Errorf("%s does not contain account profiles; please create one by running 'databricks configure'", path) + return "", ErrNoAccountProfiles{path} case 1: return profiles[0].Name, nil } @@ -269,3 +324,11 @@ func AccountClient(ctx context.Context) *databricks.AccountClient { } return a } + +func ConfigUsed(ctx context.Context) *config.Config { + cfg, ok := ctx.Value(&configUsed).(*config.Config) + if !ok { + panic("cannot get *config.Config. Please report it as a bug") + } + return cfg +} diff --git a/cmd/root/auth_test.go b/cmd/root/auth_test.go index 7864c254e..486f587ef 100644 --- a/cmd/root/auth_test.go +++ b/cmd/root/auth_test.go @@ -229,3 +229,81 @@ func TestMustAccountClientErrorsWithNoDatabricksCfg(t *testing.T) { err := MustAccountClient(cmd, []string{}) require.ErrorContains(t, err, "no configuration file found at") } + +func TestMustAnyClientCanCreateWorkspaceClient(t *testing.T) { + testutil.CleanupEnvironment(t) + + dir := t.TempDir() + configFile := filepath.Join(dir, ".databrickscfg") + err := os.WriteFile( + configFile, + []byte(` + [workspace-1111] + host = https://adb-1111.11.azuredatabricks.net/ + token = foobar + `), + 0755) + require.NoError(t, err) + + ctx, tt := cmdio.SetupTest(context.Background()) + t.Cleanup(tt.Done) + cmd := New(ctx) + + t.Setenv("DATABRICKS_CONFIG_FILE", configFile) + isAccount, err := MustAnyClient(cmd, []string{}) + require.False(t, isAccount) + require.NoError(t, err) + + w := WorkspaceClient(cmd.Context()) + require.NotNil(t, w) +} + +func TestMustAnyClientCanCreateAccountClient(t *testing.T) { + testutil.CleanupEnvironment(t) + + dir := t.TempDir() + configFile := filepath.Join(dir, ".databrickscfg") + err := os.WriteFile( + configFile, + []byte(` + [account-1111] + host = https://accounts.azuredatabricks.net/ + account_id = 1111 + token = foobar + `), + 0755) + require.NoError(t, err) + + ctx, tt := cmdio.SetupTest(context.Background()) + t.Cleanup(tt.Done) + cmd := New(ctx) + + t.Setenv("DATABRICKS_CONFIG_FILE", configFile) + isAccount, err := MustAnyClient(cmd, []string{}) + require.NoError(t, err) + require.True(t, isAccount) + + a := AccountClient(cmd.Context()) + require.NotNil(t, a) +} + +func TestMustAnyClientWithEmptyDatabricksCfg(t *testing.T) { + testutil.CleanupEnvironment(t) + + dir := t.TempDir() + configFile := filepath.Join(dir, ".databrickscfg") + err := os.WriteFile( + configFile, + []byte(""), // empty file + 0755) + require.NoError(t, err) + + ctx, tt := cmdio.SetupTest(context.Background()) + t.Cleanup(tt.Done) + cmd := New(ctx) + + t.Setenv("DATABRICKS_CONFIG_FILE", configFile) + + _, err = MustAnyClient(cmd, []string{}) + require.ErrorContains(t, err, "does not contain account profiles") +} diff --git a/libs/cmdio/render.go b/libs/cmdio/render.go index 40cdde354..ec851b8ff 100644 --- a/libs/cmdio/render.go +++ b/libs/cmdio/render.go @@ -306,6 +306,12 @@ func renderUsingTemplate(ctx context.Context, r templateRenderer, w io.Writer, h "yellow": color.YellowString, "magenta": color.MagentaString, "cyan": color.CyanString, + "bold": func(format string, a ...interface{}) string { + return color.New(color.Bold).Sprintf(format, a...) + }, + "italic": func(format string, a ...interface{}) string { + return color.New(color.Italic).Sprintf(format, a...) + }, "replace": strings.ReplaceAll, "join": strings.Join, "bool": func(v bool) string { diff --git a/libs/databrickscfg/loader.go b/libs/databrickscfg/loader.go index 1dc2a9452..2e22ee950 100644 --- a/libs/databrickscfg/loader.go +++ b/libs/databrickscfg/loader.go @@ -98,7 +98,10 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error { } log.Debugf(ctx, "Loading profile %s because of host match", match.Name()) - err = config.ConfigAttributes.ResolveFromStringMap(cfg, match.KeysHash()) + err = config.ConfigAttributes.ResolveFromStringMapWithSource(cfg, match.KeysHash(), config.Source{ + Type: config.SourceFile, + Name: configFile.Path(), + }) if err != nil { return fmt.Errorf("%s %s profile: %w", configFile.Path(), match.Name(), err) } @@ -110,7 +113,7 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error { func (l profileFromHostLoader) isAnyAuthConfigured(cfg *config.Config) bool { // If any of the auth-specific attributes are set, we can skip profile resolution. for _, a := range config.ConfigAttributes { - if a.Auth == "" { + if !a.HasAuthAttribute() { continue } if !a.IsZero(cfg) { From c1963ec0df16f37ae0bc2b3610a4d864d9f14df3 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 3 Apr 2024 10:56:46 +0200 Subject: [PATCH 111/286] Include `dyn.Path` in normalization warnings and errors (#1332) ## Changes This adds context to warnings and errors. For example: * Summary: `unknown field bar` * Location: `foo.yml:6:10` * Path: `.targets.dev.workspace` ## Tests Unit tests. --- libs/diag/diagnostic.go | 4 ++ libs/dyn/convert/normalize.go | 83 ++++++++++++++++-------------- libs/dyn/convert/normalize_test.go | 18 +++++++ 3 files changed, 66 insertions(+), 39 deletions(-) diff --git a/libs/diag/diagnostic.go b/libs/diag/diagnostic.go index 68b4ad611..ddb3af387 100644 --- a/libs/diag/diagnostic.go +++ b/libs/diag/diagnostic.go @@ -20,6 +20,10 @@ type Diagnostic struct { // Location is a source code location associated with the diagnostic message. // It may be zero if there is no associated location. Location dyn.Location + + // Path is a path to the value in a configuration tree that the diagnostic is associated with. + // It may be nil if there is no associated path. + Path dyn.Path } // Errorf creates a new error diagnostic. diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index ff4d94b88..7d6bde051 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -33,51 +33,53 @@ func Normalize(dst any, src dyn.Value, opts ...NormalizeOption) (dyn.Value, diag } } - return n.normalizeType(reflect.TypeOf(dst), src, []reflect.Type{}) + return n.normalizeType(reflect.TypeOf(dst), src, []reflect.Type{}, dyn.EmptyPath) } -func (n normalizeOptions) normalizeType(typ reflect.Type, src dyn.Value, seen []reflect.Type) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeType(typ reflect.Type, src dyn.Value, seen []reflect.Type, path dyn.Path) (dyn.Value, diag.Diagnostics) { for typ.Kind() == reflect.Pointer { typ = typ.Elem() } switch typ.Kind() { case reflect.Struct: - return n.normalizeStruct(typ, src, append(seen, typ)) + return n.normalizeStruct(typ, src, append(seen, typ), path) case reflect.Map: - return n.normalizeMap(typ, src, append(seen, typ)) + return n.normalizeMap(typ, src, append(seen, typ), path) case reflect.Slice: - return n.normalizeSlice(typ, src, append(seen, typ)) + return n.normalizeSlice(typ, src, append(seen, typ), path) case reflect.String: - return n.normalizeString(typ, src) + return n.normalizeString(typ, src, path) case reflect.Bool: - return n.normalizeBool(typ, src) + return n.normalizeBool(typ, src, path) case reflect.Int, reflect.Int32, reflect.Int64: - return n.normalizeInt(typ, src) + return n.normalizeInt(typ, src, path) case reflect.Float32, reflect.Float64: - return n.normalizeFloat(typ, src) + return n.normalizeFloat(typ, src, path) } return dyn.InvalidValue, diag.Errorf("unsupported type: %s", typ.Kind()) } -func nullWarning(expected dyn.Kind, src dyn.Value) diag.Diagnostic { +func nullWarning(expected dyn.Kind, src dyn.Value, path dyn.Path) diag.Diagnostic { return diag.Diagnostic{ Severity: diag.Warning, Summary: fmt.Sprintf("expected a %s value, found null", expected), Location: src.Location(), + Path: path, } } -func typeMismatch(expected dyn.Kind, src dyn.Value) diag.Diagnostic { +func typeMismatch(expected dyn.Kind, src dyn.Value, path dyn.Path) diag.Diagnostic { return diag.Diagnostic{ Severity: diag.Error, Summary: fmt.Sprintf("expected %s, found %s", expected, src.Kind()), Location: src.Location(), + Path: path, } } -func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen []reflect.Type) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen []reflect.Type, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { @@ -93,12 +95,13 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen Severity: diag.Warning, Summary: fmt.Sprintf("unknown field: %s", pk.MustString()), Location: pk.Location(), + Path: path, }) continue } // Normalize the value according to the field type. - nv, err := n.normalizeType(typ.FieldByIndex(index).Type, pv, seen) + nv, err := n.normalizeType(typ.FieldByIndex(index).Type, pv, seen, path.Append(dyn.Key(pk.MustString()))) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. @@ -136,17 +139,17 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen var v dyn.Value switch ftyp.Kind() { case reflect.Struct, reflect.Map: - v, _ = n.normalizeType(ftyp, dyn.V(map[string]dyn.Value{}), seen) + v, _ = n.normalizeType(ftyp, dyn.V(map[string]dyn.Value{}), seen, path.Append(dyn.Key(k))) case reflect.Slice: - v, _ = n.normalizeType(ftyp, dyn.V([]dyn.Value{}), seen) + v, _ = n.normalizeType(ftyp, dyn.V([]dyn.Value{}), seen, path.Append(dyn.Key(k))) case reflect.String: - v, _ = n.normalizeType(ftyp, dyn.V(""), seen) + v, _ = n.normalizeType(ftyp, dyn.V(""), seen, path.Append(dyn.Key(k))) case reflect.Bool: - v, _ = n.normalizeType(ftyp, dyn.V(false), seen) + v, _ = n.normalizeType(ftyp, dyn.V(false), seen, path.Append(dyn.Key(k))) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - v, _ = n.normalizeType(ftyp, dyn.V(int64(0)), seen) + v, _ = n.normalizeType(ftyp, dyn.V(int64(0)), seen, path.Append(dyn.Key(k))) case reflect.Float32, reflect.Float64: - v, _ = n.normalizeType(ftyp, dyn.V(float64(0)), seen) + v, _ = n.normalizeType(ftyp, dyn.V(float64(0)), seen, path.Append(dyn.Key(k))) default: // Skip fields for which we do not have a natural [dyn.Value] equivalent. // For example, we don't handle reflect.Complex* and reflect.Uint* types. @@ -162,10 +165,10 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen return src, diags } - return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src, path)) } -func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []reflect.Type) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []reflect.Type, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { @@ -176,7 +179,7 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []r pv := pair.Value // Normalize the value according to the map element type. - nv, err := n.normalizeType(typ.Elem(), pv, seen) + nv, err := n.normalizeType(typ.Elem(), pv, seen, path.Append(dyn.Key(pk.MustString()))) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. @@ -193,10 +196,10 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []r return src, diags } - return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src, path)) } -func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value, seen []reflect.Type) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value, seen []reflect.Type, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { @@ -204,7 +207,7 @@ func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value, seen [ out := make([]dyn.Value, 0, len(src.MustSequence())) for _, v := range src.MustSequence() { // Normalize the value according to the slice element type. - v, err := n.normalizeType(typ.Elem(), v, seen) + v, err := n.normalizeType(typ.Elem(), v, seen, path.Append(dyn.Index(len(out)))) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. @@ -221,10 +224,10 @@ func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value, seen [ return src, diags } - return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindSequence, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindSequence, src, path)) } -func (n normalizeOptions) normalizeString(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeString(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out string @@ -239,15 +242,15 @@ func (n normalizeOptions) normalizeString(typ reflect.Type, src dyn.Value) (dyn. out = strconv.FormatFloat(src.MustFloat(), 'f', -1, 64) case dyn.KindNil: // Return a warning if the field is present but has a null value. - return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindString, src)) + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindString, src, path)) default: - return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindString, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindString, src, path)) } return dyn.NewValue(out, src.Location()), diags } -func (n normalizeOptions) normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeBool(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out bool @@ -268,19 +271,19 @@ func (n normalizeOptions) normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Va } // Cannot interpret as a boolean. - return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src, path)) } case dyn.KindNil: // Return a warning if the field is present but has a null value. - return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindBool, src)) + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindBool, src, path)) default: - return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src, path)) } return dyn.NewValue(out, src.Location()), diags } -func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out int64 @@ -300,19 +303,20 @@ func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value) (dyn.Val Severity: diag.Error, Summary: fmt.Sprintf("cannot parse %q as an integer", src.MustString()), Location: src.Location(), + Path: path, }) } case dyn.KindNil: // Return a warning if the field is present but has a null value. - return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindInt, src)) + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindInt, src, path)) default: - return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindInt, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindInt, src, path)) } return dyn.NewValue(out, src.Location()), diags } -func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out float64 @@ -332,13 +336,14 @@ func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value) (dyn.V Severity: diag.Error, Summary: fmt.Sprintf("cannot parse %q as a floating point number", src.MustString()), Location: src.Location(), + Path: path, }) } case dyn.KindNil: // Return a warning if the field is present but has a null value. - return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindFloat, src)) + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindFloat, src, path)) default: - return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindFloat, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindFloat, src, path)) } return dyn.NewValue(out, src.Location()), diags diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 66e781bb8..856797968 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -43,6 +43,7 @@ func TestNormalizeStructElementDiagnostic(t *testing.T) { Severity: diag.Error, Summary: `expected string, found map`, Location: dyn.Location{}, + Path: dyn.NewPath(dyn.Key("bar")), }, err[0]) // Elements that encounter an error during normalization are dropped. @@ -68,6 +69,7 @@ func TestNormalizeStructUnknownField(t *testing.T) { Severity: diag.Warning, Summary: `unknown field: bar`, Location: vin.Get("foo").Location(), + Path: dyn.EmptyPath, }, err[0]) // The field that can be mapped to the struct field is retained. @@ -101,6 +103,7 @@ func TestNormalizeStructError(t *testing.T) { Severity: diag.Error, Summary: `expected map, found string`, Location: vin.Get("foo").Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -245,6 +248,7 @@ func TestNormalizeMapElementDiagnostic(t *testing.T) { Severity: diag.Error, Summary: `expected string, found map`, Location: dyn.Location{}, + Path: dyn.NewPath(dyn.Key("bar")), }, err[0]) // Elements that encounter an error during normalization are dropped. @@ -270,6 +274,7 @@ func TestNormalizeMapError(t *testing.T) { Severity: diag.Error, Summary: `expected map, found string`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -333,6 +338,7 @@ func TestNormalizeSliceElementDiagnostic(t *testing.T) { Severity: diag.Error, Summary: `expected string, found map`, Location: dyn.Location{}, + Path: dyn.NewPath(dyn.Index(2)), }, err[0]) // Elements that encounter an error during normalization are dropped. @@ -356,6 +362,7 @@ func TestNormalizeSliceError(t *testing.T) { Severity: diag.Error, Summary: `expected sequence, found string`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -410,6 +417,7 @@ func TestNormalizeStringNil(t *testing.T) { Severity: diag.Warning, Summary: `expected a string value, found null`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -446,6 +454,7 @@ func TestNormalizeStringError(t *testing.T) { Severity: diag.Error, Summary: `expected string, found map`, Location: dyn.Location{}, + Path: dyn.EmptyPath, }, err[0]) } @@ -466,6 +475,7 @@ func TestNormalizeBoolNil(t *testing.T) { Severity: diag.Warning, Summary: `expected a bool value, found null`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -507,6 +517,7 @@ func TestNormalizeBoolFromStringError(t *testing.T) { Severity: diag.Error, Summary: `expected bool, found string`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -519,6 +530,7 @@ func TestNormalizeBoolError(t *testing.T) { Severity: diag.Error, Summary: `expected bool, found map`, Location: dyn.Location{}, + Path: dyn.EmptyPath, }, err[0]) } @@ -539,6 +551,7 @@ func TestNormalizeIntNil(t *testing.T) { Severity: diag.Warning, Summary: `expected a int value, found null`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -567,6 +580,7 @@ func TestNormalizeIntFromStringError(t *testing.T) { Severity: diag.Error, Summary: `cannot parse "abc" as an integer`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -579,6 +593,7 @@ func TestNormalizeIntError(t *testing.T) { Severity: diag.Error, Summary: `expected int, found map`, Location: dyn.Location{}, + Path: dyn.EmptyPath, }, err[0]) } @@ -599,6 +614,7 @@ func TestNormalizeFloatNil(t *testing.T) { Severity: diag.Warning, Summary: `expected a float value, found null`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -627,6 +643,7 @@ func TestNormalizeFloatFromStringError(t *testing.T) { Severity: diag.Error, Summary: `cannot parse "abc" as a floating point number`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -639,5 +656,6 @@ func TestNormalizeFloatError(t *testing.T) { Severity: diag.Error, Summary: `expected float, found map`, Location: dyn.Location{}, + Path: dyn.EmptyPath, }, err[0]) } From f28a9d7107f0c8809ee87c1949cd063e7282ab05 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Apr 2024 12:39:53 +0200 Subject: [PATCH 112/286] Bump github.com/databricks/databricks-sdk-go from 0.36.0 to 0.37.0 (#1326) [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.36.0&new-version=0.37.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .../config/mutator/merge_job_clusters_test.go | 10 +- bundle/deploy/metadata/annotate_jobs.go | 2 +- bundle/deploy/metadata/annotate_jobs_test.go | 4 +- bundle/deploy/terraform/convert_test.go | 2 +- .../terraform/tfdyn/convert_job_test.go | 2 +- bundle/python/warning.go | 2 +- bundle/python/warning_test.go | 8 +- bundle/schema/docs/bundle_descriptions.json | 348 +++++++++++++----- cmd/bundle/generate/generate_test.go | 4 +- .../git-credentials/git-credentials.go | 4 +- .../ip-access-lists/ip-access-lists.go | 4 +- cmd/workspace/jobs/jobs.go | 22 +- cmd/workspace/lakeview/lakeview.go | 144 ++++++++ cmd/workspace/permissions/permissions.go | 9 +- go.mod | 2 +- go.sum | 4 +- 17 files changed, 443 insertions(+), 130 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index f26f23179..1d88bfb61 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -93763b0d7ae908520c229c786fff28b8fd623261 \ No newline at end of file +e316cc3d78d087522a74650e26586088da9ac8cb \ No newline at end of file diff --git a/bundle/config/mutator/merge_job_clusters_test.go b/bundle/config/mutator/merge_job_clusters_test.go index 3ddb2b63a..c9052c1f7 100644 --- a/bundle/config/mutator/merge_job_clusters_test.go +++ b/bundle/config/mutator/merge_job_clusters_test.go @@ -23,7 +23,7 @@ func TestMergeJobClusters(t *testing.T) { JobClusters: []jobs.JobCluster{ { JobClusterKey: "foo", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "13.3.x-scala2.12", NodeTypeId: "i3.xlarge", NumWorkers: 2, @@ -31,13 +31,13 @@ func TestMergeJobClusters(t *testing.T) { }, { JobClusterKey: "bar", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "10.4.x-scala2.12", }, }, { JobClusterKey: "foo", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ NodeTypeId: "i3.2xlarge", NumWorkers: 4, }, @@ -79,14 +79,14 @@ func TestMergeJobClustersWithNilKey(t *testing.T) { JobSettings: &jobs.JobSettings{ JobClusters: []jobs.JobCluster{ { - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "13.3.x-scala2.12", NodeTypeId: "i3.xlarge", NumWorkers: 2, }, }, { - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ NodeTypeId: "i3.2xlarge", NumWorkers: 4, }, diff --git a/bundle/deploy/metadata/annotate_jobs.go b/bundle/deploy/metadata/annotate_jobs.go index 372cbca13..2b03a59b7 100644 --- a/bundle/deploy/metadata/annotate_jobs.go +++ b/bundle/deploy/metadata/annotate_jobs.go @@ -29,7 +29,7 @@ func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnosti Kind: jobs.JobDeploymentKindBundle, MetadataFilePath: path.Join(b.Config.Workspace.StatePath, MetadataFileName), } - job.JobSettings.EditMode = jobs.JobSettingsEditModeUiLocked + job.JobSettings.EditMode = jobs.JobEditModeUiLocked job.JobSettings.Format = jobs.FormatMultiTask } diff --git a/bundle/deploy/metadata/annotate_jobs_test.go b/bundle/deploy/metadata/annotate_jobs_test.go index 8f2ab9c03..8dace4590 100644 --- a/bundle/deploy/metadata/annotate_jobs_test.go +++ b/bundle/deploy/metadata/annotate_jobs_test.go @@ -44,7 +44,7 @@ func TestAnnotateJobsMutator(t *testing.T) { MetadataFilePath: "/a/b/c/metadata.json", }, b.Config.Resources.Jobs["my-job-1"].JobSettings.Deployment) - assert.Equal(t, jobs.JobSettingsEditModeUiLocked, b.Config.Resources.Jobs["my-job-1"].EditMode) + assert.Equal(t, jobs.JobEditModeUiLocked, b.Config.Resources.Jobs["my-job-1"].EditMode) assert.Equal(t, jobs.FormatMultiTask, b.Config.Resources.Jobs["my-job-1"].Format) assert.Equal(t, @@ -53,7 +53,7 @@ func TestAnnotateJobsMutator(t *testing.T) { MetadataFilePath: "/a/b/c/metadata.json", }, b.Config.Resources.Jobs["my-job-2"].JobSettings.Deployment) - assert.Equal(t, jobs.JobSettingsEditModeUiLocked, b.Config.Resources.Jobs["my-job-2"].EditMode) + assert.Equal(t, jobs.JobEditModeUiLocked, b.Config.Resources.Jobs["my-job-2"].EditMode) assert.Equal(t, jobs.FormatMultiTask, b.Config.Resources.Jobs["my-job-2"].Format) } diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index fa59e092d..9621a56af 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -29,7 +29,7 @@ func TestBundleToTerraformJob(t *testing.T) { JobClusters: []jobs.JobCluster{ { JobClusterKey: "key", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "10.4.x-scala2.12", }, }, diff --git a/bundle/deploy/terraform/tfdyn/convert_job_test.go b/bundle/deploy/terraform/tfdyn/convert_job_test.go index 4e988b143..b9e1f967f 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_job_test.go @@ -21,7 +21,7 @@ func TestConvertJob(t *testing.T) { JobClusters: []jobs.JobCluster{ { JobClusterKey: "key", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "10.4.x-scala2.12", }, }, diff --git a/bundle/python/warning.go b/bundle/python/warning.go index 060509ad3..59c220a06 100644 --- a/bundle/python/warning.go +++ b/bundle/python/warning.go @@ -46,7 +46,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool { if task.JobClusterKey != "" { for _, job := range b.Config.Resources.Jobs { for _, cluster := range job.JobClusters { - if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster != nil { + if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster.SparkVersion != "" { if lowerThanExpectedVersion(ctx, cluster.NewCluster.SparkVersion) { return true } diff --git a/bundle/python/warning_test.go b/bundle/python/warning_test.go index f1fdf0bcf..990545ab4 100644 --- a/bundle/python/warning_test.go +++ b/bundle/python/warning_test.go @@ -63,13 +63,13 @@ func TestIncompatibleWheelTasksWithJobClusterKey(t *testing.T) { JobClusters: []jobs.JobCluster{ { JobClusterKey: "cluster1", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "12.2.x-scala2.12", }, }, { JobClusterKey: "cluster2", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "13.1.x-scala2.12", }, }, @@ -157,13 +157,13 @@ func TestNoIncompatibleWheelTasks(t *testing.T) { JobClusters: []jobs.JobCluster{ { JobClusterKey: "cluster1", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "12.2.x-scala2.12", }, }, { JobClusterKey: "cluster2", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "13.1.x-scala2.12", }, }, diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index c6b45a3eb..f1c887ae4 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -201,7 +201,7 @@ "description": "Deployment information for jobs managed by external sources.", "properties": { "kind": { - "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.\n" + "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle." }, "metadata_file_path": { "description": "Path of the file that contains deployment metadata." @@ -212,7 +212,7 @@ "description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding." }, "edit_mode": { - "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.\n" + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified." }, "email_notifications": { "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.", @@ -279,7 +279,7 @@ "description": "The source of the job specification in the remote repository when the job is source controlled.", "properties": { "dirty_state": { - "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.\n" + "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced." }, "import_from_git_branch": { "description": "Name of the branch which the job is imported from." @@ -322,7 +322,7 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "If new_cluster, a description of a new cluster that is created for each run.", "properties": { "apply_policy_default_values": { "description": "" @@ -652,7 +652,7 @@ } }, "max_concurrent_runs": { - "description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped." + "description": "An optional maximum allowed number of concurrent runs of the job.\nSet this value if you want to be able to execute multiple runs of the same job concurrently.\nThis is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.\nHowever, from then on, new runs are skipped unless there are fewer than 3 active runs.\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped." }, "name": { "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding." @@ -728,10 +728,10 @@ "description": "Whether this trigger is paused or not." }, "quartz_cron_expression": { - "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" + "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required." }, "timezone_id": { - "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone.\nSee [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details.\nThis field is required.\n" + "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details. This field is required." } } }, @@ -756,7 +756,7 @@ "description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference." }, "op": { - "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.\n" + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison." }, "right": { "description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference." @@ -779,13 +779,13 @@ "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used." }, "project_directory": { - "description": "Path to the project directory. Optional for Git sourced tasks, in which case if no value is provided, the root of the Git repository is used." + "description": "Path to the project directory. Optional for Git sourced tasks, in which\ncase if no value is provided, the root of the Git repository is used." }, "schema": { "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -793,7 +793,7 @@ } }, "depends_on": { - "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.\n", + "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.", "items": { "description": "", "properties": { @@ -809,9 +809,15 @@ "description": { "description": "An optional description for this task." }, + "disable_auto_optimization": { + "description": "An option to disable auto optimization in serverless" + }, "email_notifications": { "description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.", "properties": { + "no_alert_for_skipped_runs": { + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." + }, "on_duration_warning_threshold_exceeded": { "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", "items": { @@ -839,9 +845,11 @@ } }, "existing_cluster_id": { - "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." + "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs.\nWhen running jobs or tasks on an existing cluster, you may need to manually restart\nthe cluster if it stops responding. We suggest running jobs and tasks on new clusters for\ngreater reliability" + }, + "for_each_task": { + "description": "" }, - "for_each_task": null, "health": { "description": "", "properties": { @@ -868,7 +876,7 @@ "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`." }, "libraries": { - "description": "An optional list of libraries to be installed on the cluster that executes the task. The default value is an empty list.", + "description": "An optional list of libraries to be installed on the cluster.\nThe default value is an empty list.", "items": { "description": "", "properties": { @@ -930,7 +938,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "If new_cluster, a description of a new cluster that is created for each run.", "properties": { "apply_policy_default_values": { "description": "" @@ -1260,16 +1268,16 @@ "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", "properties": { "base_parameters": { - "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n", + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run\nNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.", "additionalproperties": { "description": "" } }, "notebook_path": { - "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" + "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." } } }, @@ -1291,7 +1299,7 @@ "description": "If pipeline_task, indicates that this task must execute a Pipeline.", "properties": { "full_refresh": { - "description": "If true, a full refresh will be triggered on the delta live table." + "description": "If true, triggers a full refresh on the delta live table." }, "pipeline_id": { "description": "The full name of the pipeline task to execute." @@ -1322,14 +1330,26 @@ } }, "retry_on_timeout": { - "description": "An optional policy to specify whether to retry a task when it times out." + "description": "An optional policy to specify whether to retry a job when it times out. The default behavior\nis to not retry on timeout." }, "run_if": { - "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed\n" + "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed" }, "run_job_task": { "description": "If run_job_task, indicates that this task must execute another job.", "properties": { + "dbt_commands": { + "description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`", + "items": { + "description": "" + } + }, + "jar_params": { + "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.", + "items": { + "description": "" + } + }, "job_id": { "description": "ID of the job to trigger." }, @@ -1338,6 +1358,44 @@ "additionalproperties": { "description": "" } + }, + "notebook_params": { + "description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.", + "additionalproperties": { + "description": "" + } + }, + "pipeline_params": { + "description": "", + "properties": { + "full_refresh": { + "description": "If true, triggers a full refresh on the delta live table." + } + } + }, + "python_named_params": { + "description": "A map from keys to values for jobs with Python wheel task, for example `\"python_named_params\": {\"name\": \"task\", \"data\": \"dbfs:/path/to/data.json\"}`.", + "additionalproperties": { + "description": "" + } + }, + "python_params": { + "description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "items": { + "description": "" + } + }, + "spark_submit_params": { + "description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "items": { + "description": "" + } + }, + "sql_params": { + "description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.", + "additionalproperties": { + "description": "" + } } } }, @@ -1345,13 +1403,13 @@ "description": "If spark_jar_task, indicates that this task must run a JAR.", "properties": { "jar_uri": { - "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.\n" + "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create." }, "main_class_name": { "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail." }, "parameters": { - "description": "Parameters passed to the main method.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", + "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -1362,7 +1420,7 @@ "description": "If spark_python_task, indicates that this task must run a Python file.", "properties": { "parameters": { - "description": "Command line parameters passed to the Python file.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", + "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -1371,15 +1429,15 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." } } }, "spark_submit_task": { - "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n", + "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.", "properties": { "parameters": { - "description": "Command-line parameters passed to spark submit.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", + "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -1449,7 +1507,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." } } }, @@ -1479,7 +1537,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -1540,13 +1598,13 @@ "description": "File arrival trigger settings.", "properties": { "min_time_between_triggers_seconds": { - "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n" + "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds" }, "url": { - "description": "The storage location to monitor for file arrivals. The value must point to the root or a subpath of an external location URL or the root or subpath of a Unity Catalog volume." + "description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location." }, "wait_after_last_change_seconds": { - "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n" + "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds." } } }, @@ -1554,13 +1612,13 @@ "description": "Whether this trigger is paused or not." }, "table": { - "description": "Table trigger settings.", + "description": "", "properties": { "condition": { "description": "The table(s) condition based on which to trigger a job run." }, "min_time_between_triggers_seconds": { - "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.\n" + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds." }, "table_names": { "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", @@ -1569,14 +1627,34 @@ } }, "wait_after_last_change_seconds": { - "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds.\n" + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds." + } + } + }, + "table_update": { + "description": "", + "properties": { + "condition": { + "description": "The table(s) condition based on which to trigger a job run." + }, + "min_time_between_triggers_seconds": { + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds." + }, + "table_names": { + "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "items": { + "description": "" + } + }, + "wait_after_last_change_seconds": { + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds." } } } } }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -1680,16 +1758,8 @@ } } }, - "anthropic_config": { - "description": "Anthropic Config. Only required if the provider is 'anthropic'.", - "properties": { - "anthropic_api_key": { - "description": "The Databricks secret key reference for an Anthropic API key." - } - } - }, - "aws_bedrock_config": { - "description": "AWS Bedrock Config. Only required if the provider is 'aws-bedrock'.", + "amazon_bedrock_config": { + "description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.", "properties": { "aws_access_key_id": { "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." @@ -1701,7 +1771,15 @@ "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." }, "bedrock_provider": { - "description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." + "description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." + } + } + }, + "anthropic_config": { + "description": "Anthropic Config. Only required if the provider is 'anthropic'.", + "properties": { + "anthropic_api_key": { + "description": "The Databricks secret key reference for an Anthropic API key." } } }, @@ -1759,7 +1837,7 @@ } }, "provider": { - "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" }, "task": { "description": "The task type of the external model." @@ -2734,7 +2812,7 @@ "description": "Deployment information for jobs managed by external sources.", "properties": { "kind": { - "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.\n" + "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle." }, "metadata_file_path": { "description": "Path of the file that contains deployment metadata." @@ -2745,7 +2823,7 @@ "description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding." }, "edit_mode": { - "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.\n" + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified." }, "email_notifications": { "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.", @@ -2812,7 +2890,7 @@ "description": "The source of the job specification in the remote repository when the job is source controlled.", "properties": { "dirty_state": { - "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.\n" + "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced." }, "import_from_git_branch": { "description": "Name of the branch which the job is imported from." @@ -2855,7 +2933,7 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "If new_cluster, a description of a new cluster that is created for each run.", "properties": { "apply_policy_default_values": { "description": "" @@ -3185,7 +3263,7 @@ } }, "max_concurrent_runs": { - "description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped." + "description": "An optional maximum allowed number of concurrent runs of the job.\nSet this value if you want to be able to execute multiple runs of the same job concurrently.\nThis is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.\nHowever, from then on, new runs are skipped unless there are fewer than 3 active runs.\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped." }, "name": { "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding." @@ -3261,10 +3339,10 @@ "description": "Whether this trigger is paused or not." }, "quartz_cron_expression": { - "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" + "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required." }, "timezone_id": { - "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone.\nSee [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details.\nThis field is required.\n" + "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details. This field is required." } } }, @@ -3289,7 +3367,7 @@ "description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference." }, "op": { - "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.\n" + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison." }, "right": { "description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference." @@ -3312,13 +3390,13 @@ "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used." }, "project_directory": { - "description": "Path to the project directory. Optional for Git sourced tasks, in which case if no value is provided, the root of the Git repository is used." + "description": "Path to the project directory. Optional for Git sourced tasks, in which\ncase if no value is provided, the root of the Git repository is used." }, "schema": { "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -3326,7 +3404,7 @@ } }, "depends_on": { - "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.\n", + "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.", "items": { "description": "", "properties": { @@ -3342,9 +3420,15 @@ "description": { "description": "An optional description for this task." }, + "disable_auto_optimization": { + "description": "An option to disable auto optimization in serverless" + }, "email_notifications": { "description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.", "properties": { + "no_alert_for_skipped_runs": { + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." + }, "on_duration_warning_threshold_exceeded": { "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", "items": { @@ -3372,9 +3456,11 @@ } }, "existing_cluster_id": { - "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. Only all-purpose clusters are supported. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." + "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs.\nWhen running jobs or tasks on an existing cluster, you may need to manually restart\nthe cluster if it stops responding. We suggest running jobs and tasks on new clusters for\ngreater reliability" + }, + "for_each_task": { + "description": "" }, - "for_each_task": null, "health": { "description": "", "properties": { @@ -3401,7 +3487,7 @@ "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`." }, "libraries": { - "description": "An optional list of libraries to be installed on the cluster that executes the task. The default value is an empty list.", + "description": "An optional list of libraries to be installed on the cluster.\nThe default value is an empty list.", "items": { "description": "", "properties": { @@ -3463,7 +3549,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "If new_cluster, a description of a new cluster that is created for each run.", "properties": { "apply_policy_default_values": { "description": "" @@ -3793,16 +3879,16 @@ "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", "properties": { "base_parameters": { - "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n", + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run\nNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.", "additionalproperties": { "description": "" } }, "notebook_path": { - "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" + "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." } } }, @@ -3824,7 +3910,7 @@ "description": "If pipeline_task, indicates that this task must execute a Pipeline.", "properties": { "full_refresh": { - "description": "If true, a full refresh will be triggered on the delta live table." + "description": "If true, triggers a full refresh on the delta live table." }, "pipeline_id": { "description": "The full name of the pipeline task to execute." @@ -3855,14 +3941,26 @@ } }, "retry_on_timeout": { - "description": "An optional policy to specify whether to retry a task when it times out." + "description": "An optional policy to specify whether to retry a job when it times out. The default behavior\nis to not retry on timeout." }, "run_if": { - "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed\n" + "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed" }, "run_job_task": { "description": "If run_job_task, indicates that this task must execute another job.", "properties": { + "dbt_commands": { + "description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`", + "items": { + "description": "" + } + }, + "jar_params": { + "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.", + "items": { + "description": "" + } + }, "job_id": { "description": "ID of the job to trigger." }, @@ -3871,6 +3969,44 @@ "additionalproperties": { "description": "" } + }, + "notebook_params": { + "description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.", + "additionalproperties": { + "description": "" + } + }, + "pipeline_params": { + "description": "", + "properties": { + "full_refresh": { + "description": "If true, triggers a full refresh on the delta live table." + } + } + }, + "python_named_params": { + "description": "A map from keys to values for jobs with Python wheel task, for example `\"python_named_params\": {\"name\": \"task\", \"data\": \"dbfs:/path/to/data.json\"}`.", + "additionalproperties": { + "description": "" + } + }, + "python_params": { + "description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "items": { + "description": "" + } + }, + "spark_submit_params": { + "description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "items": { + "description": "" + } + }, + "sql_params": { + "description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.", + "additionalproperties": { + "description": "" + } } } }, @@ -3878,13 +4014,13 @@ "description": "If spark_jar_task, indicates that this task must run a JAR.", "properties": { "jar_uri": { - "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.\n" + "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create." }, "main_class_name": { "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail." }, "parameters": { - "description": "Parameters passed to the main method.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", + "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -3895,7 +4031,7 @@ "description": "If spark_python_task, indicates that this task must run a Python file.", "properties": { "parameters": { - "description": "Command line parameters passed to the Python file.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", + "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -3904,15 +4040,15 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." } } }, "spark_submit_task": { - "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n", + "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.", "properties": { "parameters": { - "description": "Command-line parameters passed to spark submit.\n\nUse [task parameter variables](https://docs.databricks.com/workflows/jobs/parameter-value-references.html) such as `{{job.id}}` to pass context about job runs.\n", + "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -3982,7 +4118,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Project is located in cloud Git provider.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." } } }, @@ -4012,7 +4148,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -4073,13 +4209,13 @@ "description": "File arrival trigger settings.", "properties": { "min_time_between_triggers_seconds": { - "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n" + "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds" }, "url": { - "description": "The storage location to monitor for file arrivals. The value must point to the root or a subpath of an external location URL or the root or subpath of a Unity Catalog volume." + "description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location." }, "wait_after_last_change_seconds": { - "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n" + "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds." } } }, @@ -4087,13 +4223,13 @@ "description": "Whether this trigger is paused or not." }, "table": { - "description": "Table trigger settings.", + "description": "", "properties": { "condition": { "description": "The table(s) condition based on which to trigger a job run." }, "min_time_between_triggers_seconds": { - "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds.\n" + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds." }, "table_names": { "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", @@ -4102,14 +4238,34 @@ } }, "wait_after_last_change_seconds": { - "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds.\n" + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds." + } + } + }, + "table_update": { + "description": "", + "properties": { + "condition": { + "description": "The table(s) condition based on which to trigger a job run." + }, + "min_time_between_triggers_seconds": { + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds." + }, + "table_names": { + "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "items": { + "description": "" + } + }, + "wait_after_last_change_seconds": { + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds." } } } } }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -4213,16 +4369,8 @@ } } }, - "anthropic_config": { - "description": "Anthropic Config. Only required if the provider is 'anthropic'.", - "properties": { - "anthropic_api_key": { - "description": "The Databricks secret key reference for an Anthropic API key." - } - } - }, - "aws_bedrock_config": { - "description": "AWS Bedrock Config. Only required if the provider is 'aws-bedrock'.", + "amazon_bedrock_config": { + "description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.", "properties": { "aws_access_key_id": { "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." @@ -4234,7 +4382,15 @@ "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." }, "bedrock_provider": { - "description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." + "description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." + } + } + }, + "anthropic_config": { + "description": "Anthropic Config. Only required if the provider is 'anthropic'.", + "properties": { + "anthropic_api_key": { + "description": "The Databricks secret key reference for an Anthropic API key." } } }, @@ -4292,7 +4448,7 @@ } }, "provider": { - "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" }, "task": { "description": "The task type of the external model." diff --git a/cmd/bundle/generate/generate_test.go b/cmd/bundle/generate/generate_test.go index 69ef639ae..ae3710ac8 100644 --- a/cmd/bundle/generate/generate_test.go +++ b/cmd/bundle/generate/generate_test.go @@ -133,12 +133,12 @@ func TestGenerateJobCommand(t *testing.T) { Settings: &jobs.JobSettings{ Name: "test-job", JobClusters: []jobs.JobCluster{ - {NewCluster: &compute.ClusterSpec{ + {NewCluster: compute.ClusterSpec{ CustomTags: map[string]string{ "Tag1": "24X7-1234", }, }}, - {NewCluster: &compute.ClusterSpec{ + {NewCluster: compute.ClusterSpec{ SparkConf: map[string]string{ "spark.databricks.delta.preview.enabled": "true", }, diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index c335d4caa..2e8cc2cd4 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -65,7 +65,7 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.GitUsername, "git-username", createReq.GitUsername, `Git username.`) + cmd.Flags().StringVar(&createReq.GitUsername, "git-username", createReq.GitUsername, `The username or email provided with your Git provider account, depending on which provider you are using.`) cmd.Flags().StringVar(&createReq.PersonalAccessToken, "personal-access-token", createReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`) cmd.Use = "create GIT_PROVIDER" @@ -335,7 +335,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.GitProvider, "git-provider", updateReq.GitProvider, `Git provider.`) - cmd.Flags().StringVar(&updateReq.GitUsername, "git-username", updateReq.GitUsername, `Git username.`) + cmd.Flags().StringVar(&updateReq.GitUsername, "git-username", updateReq.GitUsername, `The username or email provided with your Git provider account, depending on which provider you are using.`) cmd.Flags().StringVar(&updateReq.PersonalAccessToken, "personal-access-token", updateReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`) cmd.Use = "update CREDENTIAL_ID" diff --git a/cmd/workspace/ip-access-lists/ip-access-lists.go b/cmd/workspace/ip-access-lists/ip-access-lists.go index ec8be99f6..ec5958b5b 100755 --- a/cmd/workspace/ip-access-lists/ip-access-lists.go +++ b/cmd/workspace/ip-access-lists/ip-access-lists.go @@ -243,13 +243,13 @@ func newDelete() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetIpAccessList, + *settings.GetIpAccessListRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetIpAccessList + var getReq settings.GetIpAccessListRequest // TODO: short flags diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 17bef3aaa..267dfc73b 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -436,7 +436,7 @@ func newDeleteRun() *cobra.Command { Deletes a non-active run. Returns an error if the run is active. Arguments: - RUN_ID: The canonical identifier of the run for which to retrieve the metadata.` + RUN_ID: ID of the run to delete.` cmd.Annotations = make(map[string]string) @@ -470,14 +470,14 @@ func newDeleteRun() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "The canonical identifier of the run for which to retrieve the metadata") + id, err := cmdio.Select(ctx, names, "ID of the run to delete") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have the canonical identifier of the run for which to retrieve the metadata") + return fmt.Errorf("expected to have id of the run to delete") } _, err = fmt.Sscan(args[0], &deleteRunReq.RunId) if err != nil { @@ -908,7 +908,7 @@ func newGetRunOutput() *cobra.Command { 60 days, you must save old run results before they expire. Arguments: - RUN_ID: The canonical identifier for the run. This field is required.` + RUN_ID: The canonical identifier for the run.` cmd.Annotations = make(map[string]string) @@ -1038,8 +1038,8 @@ func newListRuns() *cobra.Command { cmd.Flags().IntVar(&listRunsReq.Offset, "offset", listRunsReq.Offset, `The offset of the first run to return, relative to the most recent run.`) cmd.Flags().StringVar(&listRunsReq.PageToken, "page-token", listRunsReq.PageToken, `Use next_page_token or prev_page_token returned from the previous request to list the next or previous page of runs respectively.`) cmd.Flags().Var(&listRunsReq.RunType, "run-type", `The type of runs to return. Supported values: [JOB_RUN, SUBMIT_RUN, WORKFLOW_RUN]`) - cmd.Flags().IntVar(&listRunsReq.StartTimeFrom, "start-time-from", listRunsReq.StartTimeFrom, `Show runs that started _at or after_ this value.`) - cmd.Flags().IntVar(&listRunsReq.StartTimeTo, "start-time-to", listRunsReq.StartTimeTo, `Show runs that started _at or before_ this value.`) + cmd.Flags().Int64Var(&listRunsReq.StartTimeFrom, "start-time-from", listRunsReq.StartTimeFrom, `Show runs that started _at or after_ this value.`) + cmd.Flags().Int64Var(&listRunsReq.StartTimeTo, "start-time-to", listRunsReq.StartTimeTo, `Show runs that started _at or before_ this value.`) cmd.Use = "list-runs" cmd.Short = `List job runs.` @@ -1502,13 +1502,23 @@ func newSubmit() *cobra.Command { cmd.Flags().Var(&submitJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: access_control_list + // TODO: complex arg: condition_task + // TODO: complex arg: dbt_task // TODO: complex arg: email_notifications // TODO: complex arg: git_source // TODO: complex arg: health cmd.Flags().StringVar(&submitReq.IdempotencyToken, "idempotency-token", submitReq.IdempotencyToken, `An optional token that can be used to guarantee the idempotency of job run requests.`) + // TODO: complex arg: notebook_task // TODO: complex arg: notification_settings + // TODO: complex arg: pipeline_task + // TODO: complex arg: python_wheel_task // TODO: complex arg: queue + // TODO: complex arg: run_job_task cmd.Flags().StringVar(&submitReq.RunName, "run-name", submitReq.RunName, `An optional name for the run.`) + // TODO: complex arg: spark_jar_task + // TODO: complex arg: spark_python_task + // TODO: complex arg: spark_submit_task + // TODO: complex arg: sql_task // TODO: array: tasks cmd.Flags().IntVar(&submitReq.TimeoutSeconds, "timeout-seconds", submitReq.TimeoutSeconds, `An optional timeout applied to each run of this job.`) // TODO: complex arg: webhook_notifications diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 8481a6a8c..b0136de20 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -33,8 +33,10 @@ func New() *cobra.Command { cmd.AddCommand(newCreate()) cmd.AddCommand(newGet()) cmd.AddCommand(newGetPublished()) + cmd.AddCommand(newMigrate()) cmd.AddCommand(newPublish()) cmd.AddCommand(newTrash()) + cmd.AddCommand(newUnpublish()) cmd.AddCommand(newUpdate()) // Apply optional overrides to this command. @@ -240,6 +242,87 @@ func newGetPublished() *cobra.Command { return cmd } +// start migrate command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var migrateOverrides []func( + *cobra.Command, + *dashboards.MigrateDashboardRequest, +) + +func newMigrate() *cobra.Command { + cmd := &cobra.Command{} + + var migrateReq dashboards.MigrateDashboardRequest + var migrateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&migrateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&migrateReq.DisplayName, "display-name", migrateReq.DisplayName, `Display name for the new Lakeview dashboard.`) + cmd.Flags().StringVar(&migrateReq.ParentPath, "parent-path", migrateReq.ParentPath, `The workspace path of the folder to contain the migrated Lakeview dashboard.`) + + cmd.Use = "migrate SOURCE_DASHBOARD_ID" + cmd.Short = `Migrate dashboard.` + cmd.Long = `Migrate dashboard. + + Migrates a classic SQL dashboard to Lakeview. + + Arguments: + SOURCE_DASHBOARD_ID: UUID of the dashboard to be migrated.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'source_dashboard_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = migrateJson.Unmarshal(&migrateReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + migrateReq.SourceDashboardId = args[0] + } + + response, err := w.Lakeview.Migrate(ctx, migrateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range migrateOverrides { + fn(cmd, &migrateReq) + } + + return cmd +} + // start publish command // Slice with functions to override default command behavior. @@ -367,6 +450,67 @@ func newTrash() *cobra.Command { return cmd } +// start unpublish command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var unpublishOverrides []func( + *cobra.Command, + *dashboards.UnpublishDashboardRequest, +) + +func newUnpublish() *cobra.Command { + cmd := &cobra.Command{} + + var unpublishReq dashboards.UnpublishDashboardRequest + + // TODO: short flags + + cmd.Use = "unpublish DASHBOARD_ID" + cmd.Short = `Unpublish dashboard.` + cmd.Long = `Unpublish dashboard. + + Unpublish the dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to be published.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + unpublishReq.DashboardId = args[0] + + err = w.Lakeview.Unpublish(ctx, unpublishReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range unpublishOverrides { + fn(cmd, &unpublishReq) + } + + return cmd +} + // start update command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index 38a3bf9c0..57a7d1e5e 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -64,6 +64,9 @@ func New() *cobra.Command { For the mapping of the required permissions for specific actions or abilities and other important information, see [Access Control]. + Note that to manage access control on service principals, use **[Account + Access Control Proxy](:service:accountaccesscontrolproxy)**. + [Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html`, GroupID: "iam", Annotations: map[string]string{ @@ -112,7 +115,7 @@ func newGet() *cobra.Command { REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: authorization, clusters, cluster-policies, directories, experiments, files, instance-pools, jobs, notebooks, pipelines, registered-models, - repos, serving-endpoints, or sql-warehouses. + repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` cmd.Annotations = make(map[string]string) @@ -240,7 +243,7 @@ func newSet() *cobra.Command { REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: authorization, clusters, cluster-policies, directories, experiments, files, instance-pools, jobs, notebooks, pipelines, registered-models, - repos, serving-endpoints, or sql-warehouses. + repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` cmd.Annotations = make(map[string]string) @@ -314,7 +317,7 @@ func newUpdate() *cobra.Command { REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: authorization, clusters, cluster-policies, directories, experiments, files, instance-pools, jobs, notebooks, pipelines, registered-models, - repos, serving-endpoints, or sql-warehouses. + repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` cmd.Annotations = make(map[string]string) diff --git a/go.mod b/go.mod index 88fb8faeb..931252baa 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.36.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.37.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index fc978c841..048e8e02e 100644 --- a/go.sum +++ b/go.sum @@ -30,8 +30,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.36.0 h1:QOO9VxBh6JmzzPpCHh0h1f4Ijk+Y3mqBtNN1nzp2Nq8= -github.com/databricks/databricks-sdk-go v0.36.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= +github.com/databricks/databricks-sdk-go v0.37.0 h1:8ej3hNqfyfDNdV5YBjfLbq+p99JLu5NTtzwObbsIhRM= +github.com/databricks/databricks-sdk-go v0.37.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From a95b1c7dcfb00918c2aa6eb6e6dfa9f60ed58b11 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 3 Apr 2024 12:40:29 +0200 Subject: [PATCH 113/286] Retain location information of variable reference (#1333) ## Changes Variable substitution works as if the variable reference is literally replaced with its contents. The following fields should be interpreted in the same way regardless of where the variable is defined: ```yaml foo: ${var.some_path} bar: "./${var.some_path}" ``` Before this change, `foo` would inherit the location information of the variable definition. After this change, it uses the location information of the variable reference, making the behavior for `foo` and `bar` identical. Fixes #1330. ## Tests The new test passes only with the fix. --- .../relative_path_translation/databricks.yml | 33 ++++++++++++ .../resources/job.yml | 14 +++++ .../relative_path_translation/src/file1.py | 0 .../relative_path_translation/src/file2.py | 0 .../tests/relative_path_translation_test.go | 53 +++++++++++++++++++ libs/dyn/dynvar/resolve.go | 7 ++- 6 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 bundle/tests/relative_path_translation/databricks.yml create mode 100644 bundle/tests/relative_path_translation/resources/job.yml create mode 100644 bundle/tests/relative_path_translation/src/file1.py create mode 100644 bundle/tests/relative_path_translation/src/file2.py create mode 100644 bundle/tests/relative_path_translation_test.go diff --git a/bundle/tests/relative_path_translation/databricks.yml b/bundle/tests/relative_path_translation/databricks.yml new file mode 100644 index 000000000..651ff267c --- /dev/null +++ b/bundle/tests/relative_path_translation/databricks.yml @@ -0,0 +1,33 @@ +bundle: + name: relative_path_translation + +include: + - resources/*.yml + +variables: + file_path: + # This path is expected to be resolved relative to where it is used. + default: ../src/file1.py + +workspace: + file_path: /remote + +targets: + default: + default: true + + override: + variables: + file_path: ./src/file2.py + + resources: + jobs: + job: + tasks: + - task_key: local + spark_python_task: + python_file: ./src/file2.py + + - task_key: variable_reference + spark_python_task: + python_file: ${var.file_path} diff --git a/bundle/tests/relative_path_translation/resources/job.yml b/bundle/tests/relative_path_translation/resources/job.yml new file mode 100644 index 000000000..93f121f25 --- /dev/null +++ b/bundle/tests/relative_path_translation/resources/job.yml @@ -0,0 +1,14 @@ +resources: + jobs: + job: + tasks: + - task_key: local + spark_python_task: + python_file: ../src/file1.py + + - task_key: variable_reference + spark_python_task: + # Note: this is a pure variable reference yet needs to persist the location + # of the reference, not the location of the variable value. + # Also see https://github.com/databricks/cli/issues/1330. + python_file: ${var.file_path} diff --git a/bundle/tests/relative_path_translation/src/file1.py b/bundle/tests/relative_path_translation/src/file1.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/tests/relative_path_translation/src/file2.py b/bundle/tests/relative_path_translation/src/file2.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/tests/relative_path_translation_test.go b/bundle/tests/relative_path_translation_test.go new file mode 100644 index 000000000..d5b80bea5 --- /dev/null +++ b/bundle/tests/relative_path_translation_test.go @@ -0,0 +1,53 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func configureMock(t *testing.T, b *bundle.Bundle) { + // Configure mock workspace client + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &config.Config{ + Host: "https://mock.databricks.workspace.com", + } + m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{ + UserName: "user@domain.com", + }, nil) + b.SetWorkpaceClient(m.WorkspaceClient) +} + +func TestRelativePathTranslationDefault(t *testing.T) { + b := loadTarget(t, "./relative_path_translation", "default") + configureMock(t, b) + + diags := bundle.Apply(context.Background(), b, phases.Initialize()) + require.NoError(t, diags.Error()) + + t0 := b.Config.Resources.Jobs["job"].Tasks[0] + assert.Equal(t, "/remote/src/file1.py", t0.SparkPythonTask.PythonFile) + t1 := b.Config.Resources.Jobs["job"].Tasks[1] + assert.Equal(t, "/remote/src/file1.py", t1.SparkPythonTask.PythonFile) +} + +func TestRelativePathTranslationOverride(t *testing.T) { + b := loadTarget(t, "./relative_path_translation", "override") + configureMock(t, b) + + diags := bundle.Apply(context.Background(), b, phases.Initialize()) + require.NoError(t, diags.Error()) + + t0 := b.Config.Resources.Jobs["job"].Tasks[0] + assert.Equal(t, "/remote/src/file2.py", t0.SparkPythonTask.PythonFile) + t1 := b.Config.Resources.Jobs["job"].Tasks[1] + assert.Equal(t, "/remote/src/file2.py", t1.SparkPythonTask.PythonFile) +} diff --git a/libs/dyn/dynvar/resolve.go b/libs/dyn/dynvar/resolve.go index b8a0aef62..d2494bc21 100644 --- a/libs/dyn/dynvar/resolve.go +++ b/libs/dyn/dynvar/resolve.go @@ -150,7 +150,12 @@ func (r *resolver) resolveRef(ref ref, seen []string) (dyn.Value, error) { if ref.isPure() && complete { // If the variable reference is pure, we can substitute it. // This is useful for interpolating values of non-string types. - return resolved[0], nil + // + // Note: we use the location of the variable reference to preserve the information + // of where it is used. This also means that relative path resolution is done + // relative to where a variable is used, not where it is defined. + // + return dyn.NewValue(resolved[0].Value(), ref.value.Location()), nil } // Not pure; perform string interpolation. From b4e264594215a23aba2d5f66cdc79b623140509f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 3 Apr 2024 13:14:23 +0200 Subject: [PATCH 114/286] Make normalization return warnings instead of errors (#1334) ## Changes Errors in normalization mean hard failure as of #1319. We currently allow malformed configurations and ignore the malformed fields and should continue to do so. ## Tests * Tests pass. * No calls to `diag.Errorf` from `libs/dyn` --- libs/dyn/convert/normalize.go | 6 +++--- libs/dyn/convert/normalize_test.go | 26 +++++++++++++------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 7d6bde051..296e2abb2 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -72,7 +72,7 @@ func nullWarning(expected dyn.Kind, src dyn.Value, path dyn.Path) diag.Diagnosti func typeMismatch(expected dyn.Kind, src dyn.Value, path dyn.Path) diag.Diagnostic { return diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: fmt.Sprintf("expected %s, found %s", expected, src.Kind()), Location: src.Location(), Path: path, @@ -300,7 +300,7 @@ func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value, path dyn } return dyn.InvalidValue, diags.Append(diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: fmt.Sprintf("cannot parse %q as an integer", src.MustString()), Location: src.Location(), Path: path, @@ -333,7 +333,7 @@ func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value, path d } return dyn.InvalidValue, diags.Append(diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: fmt.Sprintf("cannot parse %q as a floating point number", src.MustString()), Location: src.Location(), Path: path, diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 856797968..133eaef8f 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -40,7 +40,7 @@ func TestNormalizeStructElementDiagnostic(t *testing.T) { vout, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected string, found map`, Location: dyn.Location{}, Path: dyn.NewPath(dyn.Key("bar")), @@ -100,7 +100,7 @@ func TestNormalizeStructError(t *testing.T) { _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected map, found string`, Location: vin.Get("foo").Location(), Path: dyn.EmptyPath, @@ -245,7 +245,7 @@ func TestNormalizeMapElementDiagnostic(t *testing.T) { vout, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected string, found map`, Location: dyn.Location{}, Path: dyn.NewPath(dyn.Key("bar")), @@ -271,7 +271,7 @@ func TestNormalizeMapError(t *testing.T) { _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected map, found string`, Location: vin.Location(), Path: dyn.EmptyPath, @@ -335,7 +335,7 @@ func TestNormalizeSliceElementDiagnostic(t *testing.T) { vout, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected string, found map`, Location: dyn.Location{}, Path: dyn.NewPath(dyn.Index(2)), @@ -359,7 +359,7 @@ func TestNormalizeSliceError(t *testing.T) { _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected sequence, found string`, Location: vin.Location(), Path: dyn.EmptyPath, @@ -451,7 +451,7 @@ func TestNormalizeStringError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected string, found map`, Location: dyn.Location{}, Path: dyn.EmptyPath, @@ -514,7 +514,7 @@ func TestNormalizeBoolFromStringError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected bool, found string`, Location: vin.Location(), Path: dyn.EmptyPath, @@ -527,7 +527,7 @@ func TestNormalizeBoolError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected bool, found map`, Location: dyn.Location{}, Path: dyn.EmptyPath, @@ -577,7 +577,7 @@ func TestNormalizeIntFromStringError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `cannot parse "abc" as an integer`, Location: vin.Location(), Path: dyn.EmptyPath, @@ -590,7 +590,7 @@ func TestNormalizeIntError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected int, found map`, Location: dyn.Location{}, Path: dyn.EmptyPath, @@ -640,7 +640,7 @@ func TestNormalizeFloatFromStringError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `cannot parse "abc" as a floating point number`, Location: vin.Location(), Path: dyn.EmptyPath, @@ -653,7 +653,7 @@ func TestNormalizeFloatError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected float, found map`, Location: dyn.Location{}, Path: dyn.EmptyPath, From 04cbc7171ee68ea98590fa0e1fc369a530ed7108 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 3 Apr 2024 17:33:43 +0200 Subject: [PATCH 115/286] Make bundle validation print text output by default (#1335) ## Changes It now shows human-readable warnings and validation status. ## Tests * Manual tests against many examples. * Errors still return immediately. --- cmd/bundle/validate.go | 136 ++++++++++++++++++++++++++++++++++++---- libs/diag/diagnostic.go | 11 ++++ 2 files changed, 134 insertions(+), 13 deletions(-) diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index e625539b4..4a04db409 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -2,15 +2,129 @@ package bundle import ( "encoding/json" + "fmt" + "path/filepath" + "strings" + "text/template" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/flags" + "github.com/fatih/color" "github.com/spf13/cobra" ) +var validateFuncMap = template.FuncMap{ + "red": color.RedString, + "green": color.GreenString, + "blue": color.BlueString, + "yellow": color.YellowString, + "magenta": color.MagentaString, + "cyan": color.CyanString, + "bold": func(format string, a ...interface{}) string { + return color.New(color.Bold).Sprintf(format, a...) + }, + "italic": func(format string, a ...interface{}) string { + return color.New(color.Italic).Sprintf(format, a...) + }, +} + +const errorTemplate = `{{ "Error" | red }}: {{ .Summary }} + {{ "at " }}{{ .Path.String | green }} + {{ "in " }}{{ .Location.String | cyan }} + +` + +const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }} + {{ "at " }}{{ .Path.String | green }} + {{ "in " }}{{ .Location.String | cyan }} + +` + +const summaryTemplate = `Name: {{ .Config.Bundle.Name | bold }} +Target: {{ .Config.Bundle.Target | bold }} +Workspace: + Host: {{ .Config.Workspace.Host | bold }} + User: {{ .Config.Workspace.CurrentUser.UserName | bold }} + Path: {{ .Config.Workspace.RootPath | bold }} + +{{ .Trailer }} +` + +func pluralize(n int, singular, plural string) string { + if n == 1 { + return fmt.Sprintf("%d %s", n, singular) + } + return fmt.Sprintf("%d %s", n, plural) +} + +func buildTrailer(diags diag.Diagnostics) string { + parts := []string{} + if errors := len(diags.Filter(diag.Error)); errors > 0 { + parts = append(parts, color.RedString(pluralize(errors, "error", "errors"))) + } + if warnings := len(diags.Filter(diag.Warning)); warnings > 0 { + parts = append(parts, color.YellowString(pluralize(warnings, "warning", "warnings"))) + } + if len(parts) > 0 { + return fmt.Sprintf("Found %s", strings.Join(parts, " and ")) + } else { + return color.GreenString("Validation OK!") + } +} + +func renderTextOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnostics) error { + errorT := template.Must(template.New("error").Funcs(validateFuncMap).Parse(errorTemplate)) + warningT := template.Must(template.New("warning").Funcs(validateFuncMap).Parse(warningTemplate)) + + // Print errors and warnings. + for _, d := range diags { + var t *template.Template + switch d.Severity { + case diag.Error: + t = errorT + case diag.Warning: + t = warningT + } + + // Make file relative to bundle root + if d.Location.File != "" { + out, _ := filepath.Rel(b.RootPath, d.Location.File) + d.Location.File = out + } + + // Render the diagnostic with the appropriate template. + err := t.Execute(cmd.OutOrStdout(), d) + if err != nil { + return err + } + } + + // Print validation summary. + t := template.Must(template.New("summary").Funcs(validateFuncMap).Parse(summaryTemplate)) + err := t.Execute(cmd.OutOrStdout(), map[string]any{ + "Config": b.Config, + "Trailer": buildTrailer(diags), + }) + if err != nil { + return err + } + + return diags.Error() +} + +func renderJsonOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnostics) error { + buf, err := json.MarshalIndent(b.Config, "", " ") + if err != nil { + return err + } + cmd.OutOrStdout().Write(buf) + return diags.Error() +} + func newValidateCommand() *cobra.Command { cmd := &cobra.Command{ Use: "validate", @@ -25,23 +139,19 @@ func newValidateCommand() *cobra.Command { return diags.Error() } - diags = bundle.Apply(ctx, b, phases.Initialize()) + diags = diags.Extend(bundle.Apply(ctx, b, phases.Initialize())) if err := diags.Error(); err != nil { return err } - // Until we change up the output of this command to be a text representation, - // we'll just output all diagnostics as debug logs. - for _, diag := range diags { - log.Debugf(cmd.Context(), "[%s]: %s", diag.Location, diag.Summary) + switch root.OutputType(cmd) { + case flags.OutputText: + return renderTextOutput(cmd, b, diags) + case flags.OutputJSON: + return renderJsonOutput(cmd, b, diags) + default: + return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } - - buf, err := json.MarshalIndent(b.Config, "", " ") - if err != nil { - return err - } - cmd.OutOrStdout().Write(buf) - return nil } return cmd diff --git a/libs/diag/diagnostic.go b/libs/diag/diagnostic.go index ddb3af387..621527551 100644 --- a/libs/diag/diagnostic.go +++ b/libs/diag/diagnostic.go @@ -101,3 +101,14 @@ func (ds Diagnostics) Error() error { } return nil } + +// Filter returns a new list of diagnostics that match the specified severity. +func (ds Diagnostics) Filter(severity Severity) Diagnostics { + var out Diagnostics + for _, d := range ds { + if d.Severity == severity { + out = append(out, d) + } + } + return out +} From 5a7405e606600e3eb9a98cf1ca173e144ba3322a Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 3 Apr 2024 17:47:45 +0200 Subject: [PATCH 116/286] Fixed message for successful auth describe run (#1336) ## Changes Fixed message for successful auth describe run --- cmd/auth/describe.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/auth/describe.go b/cmd/auth/describe.go index 125b0731a..1f11df319 100644 --- a/cmd/auth/describe.go +++ b/cmd/auth/describe.go @@ -12,14 +12,14 @@ import ( "github.com/spf13/cobra" ) -var authTemplate = `{{"Host:" | bold}} {{.Details.Host}} -{{- if .AccountID}} -{{"Account ID:" | bold}} {{.AccountID}} +var authTemplate = `{{"Host:" | bold}} {{.Status.Details.Host}} +{{- if .Status.AccountID}} +{{"Account ID:" | bold}} {{.Status.AccountID}} {{- end}} -{{- if .Username}} -{{"User:" | bold}} {{.Username}} +{{- if .Status.Username}} +{{"User:" | bold}} {{.Status.Username}} {{- end}} -{{"Authenticated with:" | bold}} {{.Details.AuthType}} +{{"Authenticated with:" | bold}} {{.Status.Details.AuthType}} ----- ` + configurationTemplate From 6ac45e8bae4de5cca596da3485a612090145fcda Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 3 Apr 2024 19:07:23 +0200 Subject: [PATCH 117/286] Release v0.217.0 (#1337) Breaking Change: * Add allow list for resources when bundle `run_as` is set ([#1233](https://github.com/databricks/cli/pull/1233)). * Make bundle validation print text output by default ([#1335](https://github.com/databricks/cli/pull/1335)). CLI: * Added `auth describe` command ([#1244](https://github.com/databricks/cli/pull/1244)). * Fixed message for successful auth describe run ([#1336](https://github.com/databricks/cli/pull/1336)). Bundles: * Use UserName field to identify if service principal is used ([#1310](https://github.com/databricks/cli/pull/1310)). * Allow unknown properties in the config file for template initialization ([#1315](https://github.com/databricks/cli/pull/1315)). * Remove support for DATABRICKS_BUNDLE_INCLUDES ([#1317](https://github.com/databricks/cli/pull/1317)). * Make `bundle.deployment` optional in the bundle schema ([#1321](https://github.com/databricks/cli/pull/1321)). * Fix the generated DABs JSON schema ([#1322](https://github.com/databricks/cli/pull/1322)). * Make bundle loaders return diagnostics ([#1319](https://github.com/databricks/cli/pull/1319)). * Add `bundle debug terraform` command ([#1294](https://github.com/databricks/cli/pull/1294)). * Allow specifying CLI version constraints required to run the bundle ([#1320](https://github.com/databricks/cli/pull/1320)). Internal: * Retain location information of variable reference ([#1333](https://github.com/databricks/cli/pull/1333)). * Define `dyn.Mapping` to represent maps ([#1301](https://github.com/databricks/cli/pull/1301)). * Return `diag.Diagnostics` from mutators ([#1305](https://github.com/databricks/cli/pull/1305)). * Fix flaky test in `libs/process` ([#1314](https://github.com/databricks/cli/pull/1314)). * Move path field to bundle type ([#1316](https://github.com/databricks/cli/pull/1316)). * Load bundle configuration from mutator ([#1318](https://github.com/databricks/cli/pull/1318)). * Return diagnostics from `config.Load` ([#1324](https://github.com/databricks/cli/pull/1324)). * Return warning for nil primitive types during normalization ([#1329](https://github.com/databricks/cli/pull/1329)). * Include `dyn.Path` in normalization warnings and errors ([#1332](https://github.com/databricks/cli/pull/1332)). * Make normalization return warnings instead of errors ([#1334](https://github.com/databricks/cli/pull/1334)). API Changes: * Added `databricks lakeview migrate` command. * Added `databricks lakeview unpublish` command. * Changed `databricks ip-access-lists get` command . New request type is . OpenAPI commit e316cc3d78d087522a74650e26586088da9ac8cb (2024-04-03) Dependency updates: * Bump github.com/databricks/databricks-sdk-go from 0.36.0 to 0.37.0 ([#1326](https://github.com/databricks/cli/pull/1326)). --- CHANGELOG.md | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 52d7590f9..c2a507290 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,46 @@ # Version changelog +## 0.217.0 + +Breaking Change: + * Add allow list for resources when bundle `run_as` is set ([#1233](https://github.com/databricks/cli/pull/1233)). + * Make bundle validation print text output by default ([#1335](https://github.com/databricks/cli/pull/1335)). + +CLI: + * Added `auth describe` command ([#1244](https://github.com/databricks/cli/pull/1244)). + * Fixed message for successful auth describe run ([#1336](https://github.com/databricks/cli/pull/1336)). + +Bundles: + * Use UserName field to identify if service principal is used ([#1310](https://github.com/databricks/cli/pull/1310)). + * Allow unknown properties in the config file for template initialization ([#1315](https://github.com/databricks/cli/pull/1315)). + * Remove support for DATABRICKS_BUNDLE_INCLUDES ([#1317](https://github.com/databricks/cli/pull/1317)). + * Make `bundle.deployment` optional in the bundle schema ([#1321](https://github.com/databricks/cli/pull/1321)). + * Fix the generated DABs JSON schema ([#1322](https://github.com/databricks/cli/pull/1322)). + * Make bundle loaders return diagnostics ([#1319](https://github.com/databricks/cli/pull/1319)). + * Add `bundle debug terraform` command ([#1294](https://github.com/databricks/cli/pull/1294)). + * Allow specifying CLI version constraints required to run the bundle ([#1320](https://github.com/databricks/cli/pull/1320)). + +Internal: + * Retain location information of variable reference ([#1333](https://github.com/databricks/cli/pull/1333)). + * Define `dyn.Mapping` to represent maps ([#1301](https://github.com/databricks/cli/pull/1301)). + * Return `diag.Diagnostics` from mutators ([#1305](https://github.com/databricks/cli/pull/1305)). + * Fix flaky test in `libs/process` ([#1314](https://github.com/databricks/cli/pull/1314)). + * Move path field to bundle type ([#1316](https://github.com/databricks/cli/pull/1316)). + * Load bundle configuration from mutator ([#1318](https://github.com/databricks/cli/pull/1318)). + * Return diagnostics from `config.Load` ([#1324](https://github.com/databricks/cli/pull/1324)). + * Return warning for nil primitive types during normalization ([#1329](https://github.com/databricks/cli/pull/1329)). + * Include `dyn.Path` in normalization warnings and errors ([#1332](https://github.com/databricks/cli/pull/1332)). + * Make normalization return warnings instead of errors ([#1334](https://github.com/databricks/cli/pull/1334)). + +API Changes: + * Added `databricks lakeview migrate` command. + * Added `databricks lakeview unpublish` command. + * Changed `databricks ip-access-lists get` command . New request type is . + +OpenAPI commit e316cc3d78d087522a74650e26586088da9ac8cb (2024-04-03) +Dependency updates: + * Bump github.com/databricks/databricks-sdk-go from 0.36.0 to 0.37.0 ([#1326](https://github.com/databricks/cli/pull/1326)). + ## 0.216.0 CLI: From 338fe1fe62350f3c915761c80ea8ed7f624717f6 Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Fri, 5 Apr 2024 12:19:54 +0200 Subject: [PATCH 118/286] Don't attempt auth in `auth profiles --skip-validate` (#1282) This makes the command almost instant, no matter how many profiles cfg file has. One downside is that we don't set AuthType for profiles that don't have it defined. We can technically infer AuthType based on ConfigAttributes tags, but their names are different from the names of actual auth providers (and some tags cover multiple providers at the same time). --- cmd/auth/profiles.go | 9 +-------- cmd/auth/profiles_test.go | 3 ++- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/cmd/auth/profiles.go b/cmd/auth/profiles.go index 7c4a7ab2f..5ebea4440 100644 --- a/cmd/auth/profiles.go +++ b/cmd/auth/profiles.go @@ -3,7 +3,6 @@ package auth import ( "context" "fmt" - "net/http" "os" "sync" "time" @@ -45,13 +44,7 @@ func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { } if skipValidate { - err := cfg.Authenticate(&http.Request{ - Header: make(http.Header), - }) - if err != nil { - return - } - c.Host = cfg.Host + c.Host = cfg.CanonicalHostName() c.AuthType = cfg.AuthType return } diff --git a/cmd/auth/profiles_test.go b/cmd/auth/profiles_test.go index c1971705f..8a667a6db 100644 --- a/cmd/auth/profiles_test.go +++ b/cmd/auth/profiles_test.go @@ -21,8 +21,9 @@ func TestProfiles(t *testing.T) { err := databrickscfg.SaveToProfile(ctx, &config.Config{ ConfigFile: configFile, Profile: "profile1", - Host: "https://abc.cloud.databricks.com", + Host: "abc.cloud.databricks.com", Token: "token1", + AuthType: "pat", }) require.NoError(t, err) From 7d1bab7cf0171041482486b2206560c0d1a6a5d7 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 5 Apr 2024 20:19:04 +0530 Subject: [PATCH 119/286] Bump internal terraform provider version to `1.39` (#1339) --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../data_source_aws_crossaccount_policy.go | 11 +- bundle/internal/tf/schema/data_source_job.go | 46 ++- bundle/internal/tf/schema/resource_grants.go | 2 + bundle/internal/tf/schema/resource_job.go | 46 ++- .../tf/schema/resource_lakehouse_monitor.go | 71 ++++ .../tf/schema/resource_model_serving.go | 65 ++++ .../tf/schema/resource_online_table.go | 26 ++ .../tf/schema/resource_registered_model.go | 1 + ...ource_restrict_workspace_admins_setting.go | 14 + .../tf/schema/resource_vector_search_index.go | 49 +++ bundle/internal/tf/schema/resources.go | 344 +++++++++--------- bundle/internal/tf/schema/root.go | 2 +- 13 files changed, 470 insertions(+), 209 deletions(-) create mode 100644 bundle/internal/tf/schema/resource_lakehouse_monitor.go create mode 100644 bundle/internal/tf/schema/resource_online_table.go create mode 100644 bundle/internal/tf/schema/resource_restrict_workspace_admins_setting.go create mode 100644 bundle/internal/tf/schema/resource_vector_search_index.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 363ad4e8a..7780510ea 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.38.0" +const ProviderVersion = "1.39.0" diff --git a/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go b/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go index 4886a9098..d639c82a8 100644 --- a/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go +++ b/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go @@ -3,7 +3,12 @@ package schema type DataSourceAwsCrossaccountPolicy struct { - Id string `json:"id,omitempty"` - Json string `json:"json,omitempty"` - PassRoles []string `json:"pass_roles,omitempty"` + AwsAccountId string `json:"aws_account_id,omitempty"` + Id string `json:"id,omitempty"` + Json string `json:"json,omitempty"` + PassRoles []string `json:"pass_roles,omitempty"` + PolicyType string `json:"policy_type,omitempty"` + Region string `json:"region,omitempty"` + SecurityGroupId string `json:"security_group_id,omitempty"` + VpcId string `json:"vpc_id,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index 6e67b285f..6ce02b0d1 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -472,9 +472,9 @@ type DataSourceJobJobSettingsSettingsSparkSubmitTask struct { } type DataSourceJobJobSettingsSettingsTaskConditionTask struct { - Left string `json:"left,omitempty"` - Op string `json:"op,omitempty"` - Right string `json:"right,omitempty"` + Left string `json:"left"` + Op string `json:"op"` + Right string `json:"right"` } type DataSourceJobJobSettingsSettingsTaskDbtTask struct { @@ -493,6 +493,7 @@ type DataSourceJobJobSettingsSettingsTaskDependsOn struct { } type DataSourceJobJobSettingsSettingsTaskEmailNotifications struct { + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` @@ -500,9 +501,9 @@ type DataSourceJobJobSettingsSettingsTaskEmailNotifications struct { } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskConditionTask struct { - Left string `json:"left,omitempty"` - Op string `json:"op,omitempty"` - Right string `json:"right,omitempty"` + Left string `json:"left"` + Op string `json:"op"` + Right string `json:"right"` } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDbtTask struct { @@ -521,6 +522,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDependsOn struct { } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications struct { + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` @@ -806,19 +808,19 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTask struct { } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications struct { @@ -1143,19 +1145,19 @@ type DataSourceJobJobSettingsSettingsTaskSqlTask struct { } type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnFailure struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStart struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskWebhookNotifications struct { @@ -1202,25 +1204,33 @@ type DataSourceJobJobSettingsSettingsTriggerFileArrival struct { WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` } +type DataSourceJobJobSettingsSettingsTriggerTableUpdate struct { + Condition string `json:"condition,omitempty"` + MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` + TableNames []string `json:"table_names"` + WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` +} + type DataSourceJobJobSettingsSettingsTrigger struct { PauseStatus string `json:"pause_status,omitempty"` FileArrival *DataSourceJobJobSettingsSettingsTriggerFileArrival `json:"file_arrival,omitempty"` + TableUpdate *DataSourceJobJobSettingsSettingsTriggerTableUpdate `json:"table_update,omitempty"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnStart struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsWebhookNotifications struct { diff --git a/bundle/internal/tf/schema/resource_grants.go b/bundle/internal/tf/schema/resource_grants.go index 22861005f..dd00152fb 100644 --- a/bundle/internal/tf/schema/resource_grants.go +++ b/bundle/internal/tf/schema/resource_grants.go @@ -15,6 +15,8 @@ type ResourceGrants struct { Id string `json:"id,omitempty"` Metastore string `json:"metastore,omitempty"` Model string `json:"model,omitempty"` + Pipeline string `json:"pipeline,omitempty"` + Recipient string `json:"recipient,omitempty"` Schema string `json:"schema,omitempty"` Share string `json:"share,omitempty"` StorageCredential string `json:"storage_credential,omitempty"` diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index f8d08aefa..83e80c9c8 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -472,9 +472,9 @@ type ResourceJobSparkSubmitTask struct { } type ResourceJobTaskConditionTask struct { - Left string `json:"left,omitempty"` - Op string `json:"op,omitempty"` - Right string `json:"right,omitempty"` + Left string `json:"left"` + Op string `json:"op"` + Right string `json:"right"` } type ResourceJobTaskDbtTask struct { @@ -493,6 +493,7 @@ type ResourceJobTaskDependsOn struct { } type ResourceJobTaskEmailNotifications struct { + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` @@ -500,9 +501,9 @@ type ResourceJobTaskEmailNotifications struct { } type ResourceJobTaskForEachTaskTaskConditionTask struct { - Left string `json:"left,omitempty"` - Op string `json:"op,omitempty"` - Right string `json:"right,omitempty"` + Left string `json:"left"` + Op string `json:"op"` + Right string `json:"right"` } type ResourceJobTaskForEachTaskTaskDbtTask struct { @@ -521,6 +522,7 @@ type ResourceJobTaskForEachTaskTaskDependsOn struct { } type ResourceJobTaskForEachTaskTaskEmailNotifications struct { + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` @@ -806,19 +808,19 @@ type ResourceJobTaskForEachTaskTaskSqlTask struct { } type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnFailure struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStart struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskForEachTaskTaskWebhookNotifications struct { @@ -1143,19 +1145,19 @@ type ResourceJobTaskSqlTask struct { } type ResourceJobTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskWebhookNotificationsOnFailure struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskWebhookNotificationsOnStart struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskWebhookNotificationsOnSuccess struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskWebhookNotifications struct { @@ -1202,25 +1204,33 @@ type ResourceJobTriggerFileArrival struct { WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` } +type ResourceJobTriggerTableUpdate struct { + Condition string `json:"condition,omitempty"` + MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` + TableNames []string `json:"table_names"` + WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` +} + type ResourceJobTrigger struct { PauseStatus string `json:"pause_status,omitempty"` FileArrival *ResourceJobTriggerFileArrival `json:"file_arrival,omitempty"` + TableUpdate *ResourceJobTriggerTableUpdate `json:"table_update,omitempty"` } type ResourceJobWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobWebhookNotificationsOnFailure struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobWebhookNotificationsOnStart struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobWebhookNotificationsOnSuccess struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobWebhookNotifications struct { diff --git a/bundle/internal/tf/schema/resource_lakehouse_monitor.go b/bundle/internal/tf/schema/resource_lakehouse_monitor.go new file mode 100644 index 000000000..26196d2f5 --- /dev/null +++ b/bundle/internal/tf/schema/resource_lakehouse_monitor.go @@ -0,0 +1,71 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceLakehouseMonitorCustomMetrics struct { + Definition string `json:"definition,omitempty"` + InputColumns []string `json:"input_columns,omitempty"` + Name string `json:"name,omitempty"` + OutputDataType string `json:"output_data_type,omitempty"` + Type string `json:"type,omitempty"` +} + +type ResourceLakehouseMonitorDataClassificationConfig struct { + Enabled bool `json:"enabled,omitempty"` +} + +type ResourceLakehouseMonitorInferenceLog struct { + Granularities []string `json:"granularities,omitempty"` + LabelCol string `json:"label_col,omitempty"` + ModelIdCol string `json:"model_id_col,omitempty"` + PredictionCol string `json:"prediction_col,omitempty"` + PredictionProbaCol string `json:"prediction_proba_col,omitempty"` + ProblemType string `json:"problem_type,omitempty"` + TimestampCol string `json:"timestamp_col,omitempty"` +} + +type ResourceLakehouseMonitorNotificationsOnFailure struct { + EmailAddresses []string `json:"email_addresses,omitempty"` +} + +type ResourceLakehouseMonitorNotifications struct { + OnFailure *ResourceLakehouseMonitorNotificationsOnFailure `json:"on_failure,omitempty"` +} + +type ResourceLakehouseMonitorSchedule struct { + PauseStatus string `json:"pause_status,omitempty"` + QuartzCronExpression string `json:"quartz_cron_expression,omitempty"` + TimezoneId string `json:"timezone_id,omitempty"` +} + +type ResourceLakehouseMonitorSnapshot struct { +} + +type ResourceLakehouseMonitorTimeSeries struct { + Granularities []string `json:"granularities,omitempty"` + TimestampCol string `json:"timestamp_col,omitempty"` +} + +type ResourceLakehouseMonitor struct { + AssetsDir string `json:"assets_dir"` + BaselineTableName string `json:"baseline_table_name,omitempty"` + DashboardId string `json:"dashboard_id,omitempty"` + DriftMetricsTableName string `json:"drift_metrics_table_name,omitempty"` + Id string `json:"id,omitempty"` + LatestMonitorFailureMsg string `json:"latest_monitor_failure_msg,omitempty"` + MonitorVersion string `json:"monitor_version,omitempty"` + OutputSchemaName string `json:"output_schema_name"` + ProfileMetricsTableName string `json:"profile_metrics_table_name,omitempty"` + SkipBuiltinDashboard bool `json:"skip_builtin_dashboard,omitempty"` + SlicingExprs []string `json:"slicing_exprs,omitempty"` + Status string `json:"status,omitempty"` + TableName string `json:"table_name"` + WarehouseId string `json:"warehouse_id,omitempty"` + CustomMetrics []ResourceLakehouseMonitorCustomMetrics `json:"custom_metrics,omitempty"` + DataClassificationConfig *ResourceLakehouseMonitorDataClassificationConfig `json:"data_classification_config,omitempty"` + InferenceLog *ResourceLakehouseMonitorInferenceLog `json:"inference_log,omitempty"` + Notifications *ResourceLakehouseMonitorNotifications `json:"notifications,omitempty"` + Schedule *ResourceLakehouseMonitorSchedule `json:"schedule,omitempty"` + Snapshot *ResourceLakehouseMonitorSnapshot `json:"snapshot,omitempty"` + TimeSeries *ResourceLakehouseMonitorTimeSeries `json:"time_series,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_model_serving.go b/bundle/internal/tf/schema/resource_model_serving.go index 68265d9c0..a74a544ed 100644 --- a/bundle/internal/tf/schema/resource_model_serving.go +++ b/bundle/internal/tf/schema/resource_model_serving.go @@ -9,6 +9,70 @@ type ResourceModelServingConfigAutoCaptureConfig struct { TableNamePrefix string `json:"table_name_prefix,omitempty"` } +type ResourceModelServingConfigServedEntitiesExternalModelAi21LabsConfig struct { + Ai21LabsApiKey string `json:"ai21labs_api_key"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelAmazonBedrockConfig struct { + AwsAccessKeyId string `json:"aws_access_key_id"` + AwsRegion string `json:"aws_region"` + AwsSecretAccessKey string `json:"aws_secret_access_key"` + BedrockProvider string `json:"bedrock_provider"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelAnthropicConfig struct { + AnthropicApiKey string `json:"anthropic_api_key"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelCohereConfig struct { + CohereApiKey string `json:"cohere_api_key"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServingConfig struct { + DatabricksApiToken string `json:"databricks_api_token"` + DatabricksWorkspaceUrl string `json:"databricks_workspace_url"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig struct { + OpenaiApiBase string `json:"openai_api_base,omitempty"` + OpenaiApiKey string `json:"openai_api_key"` + OpenaiApiType string `json:"openai_api_type,omitempty"` + OpenaiApiVersion string `json:"openai_api_version,omitempty"` + OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"` + OpenaiOrganization string `json:"openai_organization,omitempty"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelPalmConfig struct { + PalmApiKey string `json:"palm_api_key"` +} + +type ResourceModelServingConfigServedEntitiesExternalModel struct { + Name string `json:"name"` + Provider string `json:"provider"` + Task string `json:"task"` + Ai21LabsConfig *ResourceModelServingConfigServedEntitiesExternalModelAi21LabsConfig `json:"ai21labs_config,omitempty"` + AmazonBedrockConfig *ResourceModelServingConfigServedEntitiesExternalModelAmazonBedrockConfig `json:"amazon_bedrock_config,omitempty"` + AnthropicConfig *ResourceModelServingConfigServedEntitiesExternalModelAnthropicConfig `json:"anthropic_config,omitempty"` + CohereConfig *ResourceModelServingConfigServedEntitiesExternalModelCohereConfig `json:"cohere_config,omitempty"` + DatabricksModelServingConfig *ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServingConfig `json:"databricks_model_serving_config,omitempty"` + OpenaiConfig *ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig `json:"openai_config,omitempty"` + PalmConfig *ResourceModelServingConfigServedEntitiesExternalModelPalmConfig `json:"palm_config,omitempty"` +} + +type ResourceModelServingConfigServedEntities struct { + EntityName string `json:"entity_name,omitempty"` + EntityVersion string `json:"entity_version,omitempty"` + EnvironmentVars map[string]string `json:"environment_vars,omitempty"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + MaxProvisionedThroughput int `json:"max_provisioned_throughput,omitempty"` + MinProvisionedThroughput int `json:"min_provisioned_throughput,omitempty"` + Name string `json:"name,omitempty"` + ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"` + WorkloadSize string `json:"workload_size,omitempty"` + WorkloadType string `json:"workload_type,omitempty"` + ExternalModel *ResourceModelServingConfigServedEntitiesExternalModel `json:"external_model,omitempty"` +} + type ResourceModelServingConfigServedModels struct { EnvironmentVars map[string]string `json:"environment_vars,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -31,6 +95,7 @@ type ResourceModelServingConfigTrafficConfig struct { type ResourceModelServingConfig struct { AutoCaptureConfig *ResourceModelServingConfigAutoCaptureConfig `json:"auto_capture_config,omitempty"` + ServedEntities []ResourceModelServingConfigServedEntities `json:"served_entities,omitempty"` ServedModels []ResourceModelServingConfigServedModels `json:"served_models,omitempty"` TrafficConfig *ResourceModelServingConfigTrafficConfig `json:"traffic_config,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_online_table.go b/bundle/internal/tf/schema/resource_online_table.go new file mode 100644 index 000000000..af8a348d3 --- /dev/null +++ b/bundle/internal/tf/schema/resource_online_table.go @@ -0,0 +1,26 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceOnlineTableSpecRunContinuously struct { +} + +type ResourceOnlineTableSpecRunTriggered struct { +} + +type ResourceOnlineTableSpec struct { + PerformFullCopy bool `json:"perform_full_copy,omitempty"` + PipelineId string `json:"pipeline_id,omitempty"` + PrimaryKeyColumns []string `json:"primary_key_columns,omitempty"` + SourceTableFullName string `json:"source_table_full_name,omitempty"` + TimeseriesKey string `json:"timeseries_key,omitempty"` + RunContinuously *ResourceOnlineTableSpecRunContinuously `json:"run_continuously,omitempty"` + RunTriggered *ResourceOnlineTableSpecRunTriggered `json:"run_triggered,omitempty"` +} + +type ResourceOnlineTable struct { + Id string `json:"id,omitempty"` + Name string `json:"name"` + Status []any `json:"status,omitempty"` + Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_registered_model.go b/bundle/internal/tf/schema/resource_registered_model.go index e4f1c088b..f19b68275 100644 --- a/bundle/internal/tf/schema/resource_registered_model.go +++ b/bundle/internal/tf/schema/resource_registered_model.go @@ -7,6 +7,7 @@ type ResourceRegisteredModel struct { Comment string `json:"comment,omitempty"` Id string `json:"id,omitempty"` Name string `json:"name"` + Owner string `json:"owner,omitempty"` SchemaName string `json:"schema_name"` StorageLocation string `json:"storage_location,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_restrict_workspace_admins_setting.go b/bundle/internal/tf/schema/resource_restrict_workspace_admins_setting.go new file mode 100644 index 000000000..975d501b9 --- /dev/null +++ b/bundle/internal/tf/schema/resource_restrict_workspace_admins_setting.go @@ -0,0 +1,14 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceRestrictWorkspaceAdminsSettingRestrictWorkspaceAdmins struct { + Status string `json:"status"` +} + +type ResourceRestrictWorkspaceAdminsSetting struct { + Etag string `json:"etag,omitempty"` + Id string `json:"id,omitempty"` + SettingName string `json:"setting_name,omitempty"` + RestrictWorkspaceAdmins *ResourceRestrictWorkspaceAdminsSettingRestrictWorkspaceAdmins `json:"restrict_workspace_admins,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_vector_search_index.go b/bundle/internal/tf/schema/resource_vector_search_index.go new file mode 100644 index 000000000..06f666656 --- /dev/null +++ b/bundle/internal/tf/schema/resource_vector_search_index.go @@ -0,0 +1,49 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingSourceColumns struct { + EmbeddingModelEndpointName string `json:"embedding_model_endpoint_name,omitempty"` + Name string `json:"name,omitempty"` +} + +type ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingVectorColumns struct { + EmbeddingDimension int `json:"embedding_dimension,omitempty"` + Name string `json:"name,omitempty"` +} + +type ResourceVectorSearchIndexDeltaSyncIndexSpec struct { + PipelineId string `json:"pipeline_id,omitempty"` + PipelineType string `json:"pipeline_type,omitempty"` + SourceTable string `json:"source_table,omitempty"` + EmbeddingSourceColumns []ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingSourceColumns `json:"embedding_source_columns,omitempty"` + EmbeddingVectorColumns []ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingVectorColumns `json:"embedding_vector_columns,omitempty"` +} + +type ResourceVectorSearchIndexDirectAccessIndexSpecEmbeddingSourceColumns struct { + EmbeddingModelEndpointName string `json:"embedding_model_endpoint_name,omitempty"` + Name string `json:"name,omitempty"` +} + +type ResourceVectorSearchIndexDirectAccessIndexSpecEmbeddingVectorColumns struct { + EmbeddingDimension int `json:"embedding_dimension,omitempty"` + Name string `json:"name,omitempty"` +} + +type ResourceVectorSearchIndexDirectAccessIndexSpec struct { + SchemaJson string `json:"schema_json,omitempty"` + EmbeddingSourceColumns []ResourceVectorSearchIndexDirectAccessIndexSpecEmbeddingSourceColumns `json:"embedding_source_columns,omitempty"` + EmbeddingVectorColumns []ResourceVectorSearchIndexDirectAccessIndexSpecEmbeddingVectorColumns `json:"embedding_vector_columns,omitempty"` +} + +type ResourceVectorSearchIndex struct { + Creator string `json:"creator,omitempty"` + EndpointName string `json:"endpoint_name"` + Id string `json:"id,omitempty"` + IndexType string `json:"index_type"` + Name string `json:"name"` + PrimaryKey string `json:"primary_key"` + Status []any `json:"status,omitempty"` + DeltaSyncIndexSpec *ResourceVectorSearchIndexDeltaSyncIndexSpec `json:"delta_sync_index_spec,omitempty"` + DirectAccessIndexSpec *ResourceVectorSearchIndexDirectAccessIndexSpec `json:"direct_access_index_spec,omitempty"` +} diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index 4cc81e7e7..b1b1841d6 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -3,177 +3,185 @@ package schema type Resources struct { - AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` - ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` - AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` - AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"` - AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"` - AzureBlobMount map[string]any `json:"databricks_azure_blob_mount,omitempty"` - Catalog map[string]any `json:"databricks_catalog,omitempty"` - CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"` - Cluster map[string]any `json:"databricks_cluster,omitempty"` - ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` - Connection map[string]any `json:"databricks_connection,omitempty"` - DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` - DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"` - Directory map[string]any `json:"databricks_directory,omitempty"` - Entitlements map[string]any `json:"databricks_entitlements,omitempty"` - ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` - File map[string]any `json:"databricks_file,omitempty"` - GitCredential map[string]any `json:"databricks_git_credential,omitempty"` - GlobalInitScript map[string]any `json:"databricks_global_init_script,omitempty"` - Grant map[string]any `json:"databricks_grant,omitempty"` - Grants map[string]any `json:"databricks_grants,omitempty"` - Group map[string]any `json:"databricks_group,omitempty"` - GroupInstanceProfile map[string]any `json:"databricks_group_instance_profile,omitempty"` - GroupMember map[string]any `json:"databricks_group_member,omitempty"` - GroupRole map[string]any `json:"databricks_group_role,omitempty"` - InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` - InstanceProfile map[string]any `json:"databricks_instance_profile,omitempty"` - IpAccessList map[string]any `json:"databricks_ip_access_list,omitempty"` - Job map[string]any `json:"databricks_job,omitempty"` - Library map[string]any `json:"databricks_library,omitempty"` - Metastore map[string]any `json:"databricks_metastore,omitempty"` - MetastoreAssignment map[string]any `json:"databricks_metastore_assignment,omitempty"` - MetastoreDataAccess map[string]any `json:"databricks_metastore_data_access,omitempty"` - MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` - MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` - MlflowWebhook map[string]any `json:"databricks_mlflow_webhook,omitempty"` - ModelServing map[string]any `json:"databricks_model_serving,omitempty"` - Mount map[string]any `json:"databricks_mount,omitempty"` - MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` - MwsCustomerManagedKeys map[string]any `json:"databricks_mws_customer_managed_keys,omitempty"` - MwsLogDelivery map[string]any `json:"databricks_mws_log_delivery,omitempty"` - MwsNetworks map[string]any `json:"databricks_mws_networks,omitempty"` - MwsPermissionAssignment map[string]any `json:"databricks_mws_permission_assignment,omitempty"` - MwsPrivateAccessSettings map[string]any `json:"databricks_mws_private_access_settings,omitempty"` - MwsStorageConfigurations map[string]any `json:"databricks_mws_storage_configurations,omitempty"` - MwsVpcEndpoint map[string]any `json:"databricks_mws_vpc_endpoint,omitempty"` - MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` - Notebook map[string]any `json:"databricks_notebook,omitempty"` - OboToken map[string]any `json:"databricks_obo_token,omitempty"` - PermissionAssignment map[string]any `json:"databricks_permission_assignment,omitempty"` - Permissions map[string]any `json:"databricks_permissions,omitempty"` - Pipeline map[string]any `json:"databricks_pipeline,omitempty"` - Provider map[string]any `json:"databricks_provider,omitempty"` - Recipient map[string]any `json:"databricks_recipient,omitempty"` - RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` - Repo map[string]any `json:"databricks_repo,omitempty"` - Schema map[string]any `json:"databricks_schema,omitempty"` - Secret map[string]any `json:"databricks_secret,omitempty"` - SecretAcl map[string]any `json:"databricks_secret_acl,omitempty"` - SecretScope map[string]any `json:"databricks_secret_scope,omitempty"` - ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` - ServicePrincipalRole map[string]any `json:"databricks_service_principal_role,omitempty"` - ServicePrincipalSecret map[string]any `json:"databricks_service_principal_secret,omitempty"` - Share map[string]any `json:"databricks_share,omitempty"` - SqlAlert map[string]any `json:"databricks_sql_alert,omitempty"` - SqlDashboard map[string]any `json:"databricks_sql_dashboard,omitempty"` - SqlEndpoint map[string]any `json:"databricks_sql_endpoint,omitempty"` - SqlGlobalConfig map[string]any `json:"databricks_sql_global_config,omitempty"` - SqlPermissions map[string]any `json:"databricks_sql_permissions,omitempty"` - SqlQuery map[string]any `json:"databricks_sql_query,omitempty"` - SqlTable map[string]any `json:"databricks_sql_table,omitempty"` - SqlVisualization map[string]any `json:"databricks_sql_visualization,omitempty"` - SqlWidget map[string]any `json:"databricks_sql_widget,omitempty"` - StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` - SystemSchema map[string]any `json:"databricks_system_schema,omitempty"` - Table map[string]any `json:"databricks_table,omitempty"` - Token map[string]any `json:"databricks_token,omitempty"` - User map[string]any `json:"databricks_user,omitempty"` - UserInstanceProfile map[string]any `json:"databricks_user_instance_profile,omitempty"` - UserRole map[string]any `json:"databricks_user_role,omitempty"` - VectorSearchEndpoint map[string]any `json:"databricks_vector_search_endpoint,omitempty"` - Volume map[string]any `json:"databricks_volume,omitempty"` - WorkspaceConf map[string]any `json:"databricks_workspace_conf,omitempty"` - WorkspaceFile map[string]any `json:"databricks_workspace_file,omitempty"` + AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` + ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` + AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` + AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"` + AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"` + AzureBlobMount map[string]any `json:"databricks_azure_blob_mount,omitempty"` + Catalog map[string]any `json:"databricks_catalog,omitempty"` + CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"` + Cluster map[string]any `json:"databricks_cluster,omitempty"` + ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` + Connection map[string]any `json:"databricks_connection,omitempty"` + DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` + DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"` + Directory map[string]any `json:"databricks_directory,omitempty"` + Entitlements map[string]any `json:"databricks_entitlements,omitempty"` + ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` + File map[string]any `json:"databricks_file,omitempty"` + GitCredential map[string]any `json:"databricks_git_credential,omitempty"` + GlobalInitScript map[string]any `json:"databricks_global_init_script,omitempty"` + Grant map[string]any `json:"databricks_grant,omitempty"` + Grants map[string]any `json:"databricks_grants,omitempty"` + Group map[string]any `json:"databricks_group,omitempty"` + GroupInstanceProfile map[string]any `json:"databricks_group_instance_profile,omitempty"` + GroupMember map[string]any `json:"databricks_group_member,omitempty"` + GroupRole map[string]any `json:"databricks_group_role,omitempty"` + InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` + InstanceProfile map[string]any `json:"databricks_instance_profile,omitempty"` + IpAccessList map[string]any `json:"databricks_ip_access_list,omitempty"` + Job map[string]any `json:"databricks_job,omitempty"` + LakehouseMonitor map[string]any `json:"databricks_lakehouse_monitor,omitempty"` + Library map[string]any `json:"databricks_library,omitempty"` + Metastore map[string]any `json:"databricks_metastore,omitempty"` + MetastoreAssignment map[string]any `json:"databricks_metastore_assignment,omitempty"` + MetastoreDataAccess map[string]any `json:"databricks_metastore_data_access,omitempty"` + MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` + MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` + MlflowWebhook map[string]any `json:"databricks_mlflow_webhook,omitempty"` + ModelServing map[string]any `json:"databricks_model_serving,omitempty"` + Mount map[string]any `json:"databricks_mount,omitempty"` + MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` + MwsCustomerManagedKeys map[string]any `json:"databricks_mws_customer_managed_keys,omitempty"` + MwsLogDelivery map[string]any `json:"databricks_mws_log_delivery,omitempty"` + MwsNetworks map[string]any `json:"databricks_mws_networks,omitempty"` + MwsPermissionAssignment map[string]any `json:"databricks_mws_permission_assignment,omitempty"` + MwsPrivateAccessSettings map[string]any `json:"databricks_mws_private_access_settings,omitempty"` + MwsStorageConfigurations map[string]any `json:"databricks_mws_storage_configurations,omitempty"` + MwsVpcEndpoint map[string]any `json:"databricks_mws_vpc_endpoint,omitempty"` + MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` + Notebook map[string]any `json:"databricks_notebook,omitempty"` + OboToken map[string]any `json:"databricks_obo_token,omitempty"` + OnlineTable map[string]any `json:"databricks_online_table,omitempty"` + PermissionAssignment map[string]any `json:"databricks_permission_assignment,omitempty"` + Permissions map[string]any `json:"databricks_permissions,omitempty"` + Pipeline map[string]any `json:"databricks_pipeline,omitempty"` + Provider map[string]any `json:"databricks_provider,omitempty"` + Recipient map[string]any `json:"databricks_recipient,omitempty"` + RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` + Repo map[string]any `json:"databricks_repo,omitempty"` + RestrictWorkspaceAdminsSetting map[string]any `json:"databricks_restrict_workspace_admins_setting,omitempty"` + Schema map[string]any `json:"databricks_schema,omitempty"` + Secret map[string]any `json:"databricks_secret,omitempty"` + SecretAcl map[string]any `json:"databricks_secret_acl,omitempty"` + SecretScope map[string]any `json:"databricks_secret_scope,omitempty"` + ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` + ServicePrincipalRole map[string]any `json:"databricks_service_principal_role,omitempty"` + ServicePrincipalSecret map[string]any `json:"databricks_service_principal_secret,omitempty"` + Share map[string]any `json:"databricks_share,omitempty"` + SqlAlert map[string]any `json:"databricks_sql_alert,omitempty"` + SqlDashboard map[string]any `json:"databricks_sql_dashboard,omitempty"` + SqlEndpoint map[string]any `json:"databricks_sql_endpoint,omitempty"` + SqlGlobalConfig map[string]any `json:"databricks_sql_global_config,omitempty"` + SqlPermissions map[string]any `json:"databricks_sql_permissions,omitempty"` + SqlQuery map[string]any `json:"databricks_sql_query,omitempty"` + SqlTable map[string]any `json:"databricks_sql_table,omitempty"` + SqlVisualization map[string]any `json:"databricks_sql_visualization,omitempty"` + SqlWidget map[string]any `json:"databricks_sql_widget,omitempty"` + StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` + SystemSchema map[string]any `json:"databricks_system_schema,omitempty"` + Table map[string]any `json:"databricks_table,omitempty"` + Token map[string]any `json:"databricks_token,omitempty"` + User map[string]any `json:"databricks_user,omitempty"` + UserInstanceProfile map[string]any `json:"databricks_user_instance_profile,omitempty"` + UserRole map[string]any `json:"databricks_user_role,omitempty"` + VectorSearchEndpoint map[string]any `json:"databricks_vector_search_endpoint,omitempty"` + VectorSearchIndex map[string]any `json:"databricks_vector_search_index,omitempty"` + Volume map[string]any `json:"databricks_volume,omitempty"` + WorkspaceConf map[string]any `json:"databricks_workspace_conf,omitempty"` + WorkspaceFile map[string]any `json:"databricks_workspace_file,omitempty"` } func NewResources() *Resources { return &Resources{ - AccessControlRuleSet: make(map[string]any), - ArtifactAllowlist: make(map[string]any), - AwsS3Mount: make(map[string]any), - AzureAdlsGen1Mount: make(map[string]any), - AzureAdlsGen2Mount: make(map[string]any), - AzureBlobMount: make(map[string]any), - Catalog: make(map[string]any), - CatalogWorkspaceBinding: make(map[string]any), - Cluster: make(map[string]any), - ClusterPolicy: make(map[string]any), - Connection: make(map[string]any), - DbfsFile: make(map[string]any), - DefaultNamespaceSetting: make(map[string]any), - Directory: make(map[string]any), - Entitlements: make(map[string]any), - ExternalLocation: make(map[string]any), - File: make(map[string]any), - GitCredential: make(map[string]any), - GlobalInitScript: make(map[string]any), - Grant: make(map[string]any), - Grants: make(map[string]any), - Group: make(map[string]any), - GroupInstanceProfile: make(map[string]any), - GroupMember: make(map[string]any), - GroupRole: make(map[string]any), - InstancePool: make(map[string]any), - InstanceProfile: make(map[string]any), - IpAccessList: make(map[string]any), - Job: make(map[string]any), - Library: make(map[string]any), - Metastore: make(map[string]any), - MetastoreAssignment: make(map[string]any), - MetastoreDataAccess: make(map[string]any), - MlflowExperiment: make(map[string]any), - MlflowModel: make(map[string]any), - MlflowWebhook: make(map[string]any), - ModelServing: make(map[string]any), - Mount: make(map[string]any), - MwsCredentials: make(map[string]any), - MwsCustomerManagedKeys: make(map[string]any), - MwsLogDelivery: make(map[string]any), - MwsNetworks: make(map[string]any), - MwsPermissionAssignment: make(map[string]any), - MwsPrivateAccessSettings: make(map[string]any), - MwsStorageConfigurations: make(map[string]any), - MwsVpcEndpoint: make(map[string]any), - MwsWorkspaces: make(map[string]any), - Notebook: make(map[string]any), - OboToken: make(map[string]any), - PermissionAssignment: make(map[string]any), - Permissions: make(map[string]any), - Pipeline: make(map[string]any), - Provider: make(map[string]any), - Recipient: make(map[string]any), - RegisteredModel: make(map[string]any), - Repo: make(map[string]any), - Schema: make(map[string]any), - Secret: make(map[string]any), - SecretAcl: make(map[string]any), - SecretScope: make(map[string]any), - ServicePrincipal: make(map[string]any), - ServicePrincipalRole: make(map[string]any), - ServicePrincipalSecret: make(map[string]any), - Share: make(map[string]any), - SqlAlert: make(map[string]any), - SqlDashboard: make(map[string]any), - SqlEndpoint: make(map[string]any), - SqlGlobalConfig: make(map[string]any), - SqlPermissions: make(map[string]any), - SqlQuery: make(map[string]any), - SqlTable: make(map[string]any), - SqlVisualization: make(map[string]any), - SqlWidget: make(map[string]any), - StorageCredential: make(map[string]any), - SystemSchema: make(map[string]any), - Table: make(map[string]any), - Token: make(map[string]any), - User: make(map[string]any), - UserInstanceProfile: make(map[string]any), - UserRole: make(map[string]any), - VectorSearchEndpoint: make(map[string]any), - Volume: make(map[string]any), - WorkspaceConf: make(map[string]any), - WorkspaceFile: make(map[string]any), + AccessControlRuleSet: make(map[string]any), + ArtifactAllowlist: make(map[string]any), + AwsS3Mount: make(map[string]any), + AzureAdlsGen1Mount: make(map[string]any), + AzureAdlsGen2Mount: make(map[string]any), + AzureBlobMount: make(map[string]any), + Catalog: make(map[string]any), + CatalogWorkspaceBinding: make(map[string]any), + Cluster: make(map[string]any), + ClusterPolicy: make(map[string]any), + Connection: make(map[string]any), + DbfsFile: make(map[string]any), + DefaultNamespaceSetting: make(map[string]any), + Directory: make(map[string]any), + Entitlements: make(map[string]any), + ExternalLocation: make(map[string]any), + File: make(map[string]any), + GitCredential: make(map[string]any), + GlobalInitScript: make(map[string]any), + Grant: make(map[string]any), + Grants: make(map[string]any), + Group: make(map[string]any), + GroupInstanceProfile: make(map[string]any), + GroupMember: make(map[string]any), + GroupRole: make(map[string]any), + InstancePool: make(map[string]any), + InstanceProfile: make(map[string]any), + IpAccessList: make(map[string]any), + Job: make(map[string]any), + LakehouseMonitor: make(map[string]any), + Library: make(map[string]any), + Metastore: make(map[string]any), + MetastoreAssignment: make(map[string]any), + MetastoreDataAccess: make(map[string]any), + MlflowExperiment: make(map[string]any), + MlflowModel: make(map[string]any), + MlflowWebhook: make(map[string]any), + ModelServing: make(map[string]any), + Mount: make(map[string]any), + MwsCredentials: make(map[string]any), + MwsCustomerManagedKeys: make(map[string]any), + MwsLogDelivery: make(map[string]any), + MwsNetworks: make(map[string]any), + MwsPermissionAssignment: make(map[string]any), + MwsPrivateAccessSettings: make(map[string]any), + MwsStorageConfigurations: make(map[string]any), + MwsVpcEndpoint: make(map[string]any), + MwsWorkspaces: make(map[string]any), + Notebook: make(map[string]any), + OboToken: make(map[string]any), + OnlineTable: make(map[string]any), + PermissionAssignment: make(map[string]any), + Permissions: make(map[string]any), + Pipeline: make(map[string]any), + Provider: make(map[string]any), + Recipient: make(map[string]any), + RegisteredModel: make(map[string]any), + Repo: make(map[string]any), + RestrictWorkspaceAdminsSetting: make(map[string]any), + Schema: make(map[string]any), + Secret: make(map[string]any), + SecretAcl: make(map[string]any), + SecretScope: make(map[string]any), + ServicePrincipal: make(map[string]any), + ServicePrincipalRole: make(map[string]any), + ServicePrincipalSecret: make(map[string]any), + Share: make(map[string]any), + SqlAlert: make(map[string]any), + SqlDashboard: make(map[string]any), + SqlEndpoint: make(map[string]any), + SqlGlobalConfig: make(map[string]any), + SqlPermissions: make(map[string]any), + SqlQuery: make(map[string]any), + SqlTable: make(map[string]any), + SqlVisualization: make(map[string]any), + SqlWidget: make(map[string]any), + StorageCredential: make(map[string]any), + SystemSchema: make(map[string]any), + Table: make(map[string]any), + Token: make(map[string]any), + User: make(map[string]any), + UserInstanceProfile: make(map[string]any), + UserRole: make(map[string]any), + VectorSearchEndpoint: make(map[string]any), + VectorSearchIndex: make(map[string]any), + Volume: make(map[string]any), + WorkspaceConf: make(map[string]any), + WorkspaceFile: make(map[string]any), } } diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 395326329..0bfab73fb 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.38.0" +const ProviderVersion = "1.39.0" func NewRoot() *Root { return &Root{ From 77ff994d1ba2cc3c3c1a7dd055d063391e89b07e Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 5 Apr 2024 17:52:39 +0200 Subject: [PATCH 120/286] Correctly transform libraries in for_each_task block (#1340) ## Changes Now DABs correctly transforms and deploys libraries in for_each_task block ``` tasks: - task_key: my_loop for_each_task: inputs: "[1,2,3]" task: task_key: my_loop_iteration libraries: - pypi: package: my_package ``` ## Tests Added regression test --- bundle/deploy/terraform/convert.go | 10 +++++ bundle/deploy/terraform/convert_test.go | 44 ++++++++++++++++++++ bundle/deploy/terraform/tfdyn/convert_job.go | 13 +++++- 3 files changed, 66 insertions(+), 1 deletion(-) diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index f2fb77e18..0ae6751d0 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -100,6 +100,16 @@ func BundleToTerraform(config *config.Root) *schema.Root { t.Library = append(t.Library, l) } + // Convert for_each_task libraries + if v.ForEachTask != nil { + for _, v_ := range v.ForEachTask.Task.Libraries { + var l schema.ResourceJobTaskForEachTaskTaskLibrary + conv(v_, &l) + t.ForEachTask.Task.Library = append(t.ForEachTask.Task.Library, l) + } + + } + dst.Task = append(dst.Task, t) } diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 9621a56af..986599a79 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -140,6 +140,50 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { bundleToTerraformEquivalenceTest(t, &config) } +func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { + var src = resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "my job", + Tasks: []jobs.Task{ + { + TaskKey: "key", + ForEachTask: &jobs.ForEachTask{ + Inputs: "[1,2,3]", + Task: jobs.Task{ + TaskKey: "iteration", + Libraries: []compute.Library{ + { + Pypi: &compute.PythonPyPiLibrary{ + Package: "mlflow", + }, + }, + }, + }, + }, + }, + }, + }, + } + + var config = config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my_job": &src, + }, + }, + } + + out := BundleToTerraform(&config) + resource := out.Resource.Job["my_job"].(*schema.ResourceJob) + + assert.Equal(t, "my job", resource.Name) + require.Len(t, resource.Task, 1) + require.Len(t, resource.Task[0].ForEachTask.Task.Library, 1) + assert.Equal(t, "mlflow", resource.Task[0].ForEachTask.Task.Library[0].Pypi.Package) + + bundleToTerraformEquivalenceTest(t, &config) +} + func TestBundleToTerraformPipeline(t *testing.T) { var src = resources.Pipeline{ PipelineSpec: &pipelines.PipelineSpec{ diff --git a/bundle/deploy/terraform/tfdyn/convert_job.go b/bundle/deploy/terraform/tfdyn/convert_job.go index 778af1adc..65ac8b9bd 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job.go +++ b/bundle/deploy/terraform/tfdyn/convert_job.go @@ -45,7 +45,18 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { // Modify keys in the "task" blocks vout, err = dyn.Map(vout, "task", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { - return renameKeys(v, map[string]string{ + // Modify "library" blocks for for_each_task + vout, err = dyn.Map(v, "for_each_task.task", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + return renameKeys(v, map[string]string{ + "libraries": "library", + }) + }) + + if err != nil { + return dyn.InvalidValue, err + } + + return renameKeys(vout, map[string]string{ "libraries": "library", }) })) From 60a4a347f9812bae127725b7f51b122083cc2a0a Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 8 Apr 2024 13:19:13 +0200 Subject: [PATCH 121/286] Fixed typo in error template for auth describe (#1341) ## Changes Fixed typo in error template for auth describe ## Tests Manually + added integration test --- cmd/auth/describe.go | 2 +- internal/auth_describe_test.go | 49 ++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 internal/auth_describe_test.go diff --git a/cmd/auth/describe.go b/cmd/auth/describe.go index 1f11df319..3a6e3d5d7 100644 --- a/cmd/auth/describe.go +++ b/cmd/auth/describe.go @@ -23,7 +23,7 @@ var authTemplate = `{{"Host:" | bold}} {{.Status.Details.Host}} ----- ` + configurationTemplate -var errorTemplate = `Unable to authenticate: {{.Error}} +var errorTemplate = `Unable to authenticate: {{.Status.Error}} ----- ` + configurationTemplate diff --git a/internal/auth_describe_test.go b/internal/auth_describe_test.go new file mode 100644 index 000000000..90b5d6801 --- /dev/null +++ b/internal/auth_describe_test.go @@ -0,0 +1,49 @@ +package internal + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go" + "github.com/stretchr/testify/require" +) + +func TestAuthDescribeSuccess(t *testing.T) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + stdout, _ := RequireSuccessfulRun(t, "auth", "describe") + outStr := stdout.String() + + w, err := databricks.NewWorkspaceClient(&databricks.Config{}) + require.NoError(t, err) + + require.NotEmpty(t, outStr) + require.Contains(t, outStr, fmt.Sprintf("Host: %s", w.Config.Host)) + + me, err := w.CurrentUser.Me(context.Background()) + require.NoError(t, err) + require.Contains(t, outStr, fmt.Sprintf("User: %s", me.UserName)) + require.Contains(t, outStr, fmt.Sprintf("Authenticated with: %s", w.Config.AuthType)) + require.Contains(t, outStr, "Current configuration:") + require.Contains(t, outStr, fmt.Sprintf("✓ host: %s", w.Config.Host)) + require.Contains(t, outStr, "✓ profile: default") +} + +func TestAuthDescribeFailure(t *testing.T) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + stdout, _ := RequireSuccessfulRun(t, "auth", "describe", "--profile", "nonexistent") + outStr := stdout.String() + + require.NotEmpty(t, outStr) + require.Contains(t, outStr, "Unable to authenticate: resolve") + require.Contains(t, outStr, "has no nonexistent profile configured") + require.Contains(t, outStr, "Current configuration:") + + w, err := databricks.NewWorkspaceClient(&databricks.Config{}) + require.NoError(t, err) + + require.Contains(t, outStr, fmt.Sprintf("✓ host: %s", w.Config.Host)) + require.Contains(t, outStr, "✓ profile: nonexistent (from --profile flag)") +} From 87b3621bde4e1ca49b814d893d8f741655f3e68d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 16:52:29 +0530 Subject: [PATCH 122/286] Bump golang.org/x/term from 0.18.0 to 0.19.0 (#1343) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.18.0 to 0.19.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.18.0&new-version=0.19.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 931252baa..70acec54c 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( golang.org/x/mod v0.16.0 golang.org/x/oauth2 v0.18.0 golang.org/x/sync v0.6.0 - golang.org/x/term v0.18.0 + golang.org/x/term v0.19.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 @@ -60,7 +60,7 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/net v0.22.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.169.0 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index 048e8e02e..4b1af480f 100644 --- a/go.sum +++ b/go.sum @@ -220,12 +220,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From 470e2fa9f7de37560ee4934ba92650fc7840b6c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 16:54:41 +0530 Subject: [PATCH 123/286] Bump github.com/hashicorp/hc-install from 0.6.3 to 0.6.4 (#1344) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/hashicorp/hc-install](https://github.com/hashicorp/hc-install) from 0.6.3 to 0.6.4.
Release notes

Sourced from github.com/hashicorp/hc-install's releases.

v0.6.4

DEPENDENCIES:

Commits
  • 435c928 Update VERSION to cut 0.6.4
  • adbfc42 build(deps): bump github.com/go-git/go-git/v5 from 5.11.0 to 5.12.0 (#190)
  • 1009bb6 build(deps): bump github.com/ProtonMail/go-crypto (#189)
  • ac9ac80 build(deps): bump golang.org/x/mod from 0.15.0 to 0.16.0 (#188)
  • be9eff2 build(deps): bump github.com/ProtonMail/go-crypto (#187)
  • 71ed3f4 Result of tsccr-helper -log-level=info gha update -latest . (#185)
  • 178d8be Update VERSION back to dev
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/hc-install&package-manager=go_modules&previous-version=0.6.3&new-version=0.6.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 70acec54c..f012bf313 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 - github.com/hashicorp/hc-install v0.6.3 // MPL 2.0 + github.com/hashicorp/hc-install v0.6.4 // MPL 2.0 github.com/hashicorp/terraform-exec v0.20.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.21.0 // MPL 2.0 github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause @@ -34,7 +34,7 @@ require ( require ( cloud.google.com/go/compute v1.23.4 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect github.com/cloudflare/circl v1.3.7 // indirect diff --git a/go.sum b/go.sum index 4b1af480f..1d6946a3e 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0 github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= @@ -52,8 +52,8 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -102,8 +102,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= -github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= +github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= +github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= @@ -136,10 +136,10 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= From 9c54c249a15d6b3bfd46feb57e1a24d26b585c82 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 11:26:03 +0000 Subject: [PATCH 124/286] Bump golang.org/x/mod from 0.16.0 to 0.17.0 (#1345) Bumps [golang.org/x/mod](https://github.com/golang/mod) from 0.16.0 to 0.17.0.
Commits
  • aa51b25 modfile: do not collapse if there are unattached comments within blocks
  • 87140ec sumdb/tlog: make NewTiles only generate strictly necessary tiles
  • 18d3f56 modfile: fix crash on AddGoStmt in empty File
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/mod&package-manager=go_modules&previous-version=0.16.0&new-version=0.17.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f012bf313..63ee3ded7 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/mod v0.16.0 + golang.org/x/mod v0.17.0 golang.org/x/oauth2 v0.18.0 golang.org/x/sync v0.6.0 golang.org/x/term v0.19.0 diff --git a/go.sum b/go.sum index 1d6946a3e..c69f999b9 100644 --- a/go.sum +++ b/go.sum @@ -184,8 +184,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= From acec87229809b66a7bb15436f369675ed7eea7ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 17:18:25 +0530 Subject: [PATCH 125/286] Bump golang.org/x/oauth2 from 0.18.0 to 0.19.0 (#1347) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.18.0 to 0.19.0.
Commits
  • d0e617c google: add Credentials.UniverseDomainProvider
  • 3c9c1f6 oauth2/google: fix the logic of sts 0 value of expires_in
  • 5a05c65 oauth2/google: fix remove content-type header from idms get requests
  • 3a6776a appengine: drop obsolete code for AppEngine envs <=Go 1.11
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/oauth2&package-manager=go_modules&previous-version=0.18.0&new-version=0.19.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 3 +-- go.sum | 25 ++----------------------- 2 files changed, 3 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index 63ee3ded7..25c1320a2 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.17.0 - golang.org/x/oauth2 v0.18.0 + golang.org/x/oauth2 v0.19.0 golang.org/x/sync v0.6.0 golang.org/x/term v0.19.0 golang.org/x/text v0.14.0 @@ -63,7 +63,6 @@ require ( golang.org/x/sys v0.19.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.169.0 // indirect - google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect google.golang.org/grpc v1.62.0 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/go.sum b/go.sum index c69f999b9..76e9e4be9 100644 --- a/go.sum +++ b/go.sum @@ -74,7 +74,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -157,7 +156,6 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= @@ -174,7 +172,6 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -183,7 +180,6 @@ golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -191,19 +187,15 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -213,23 +205,15 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -239,18 +223,13 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= From 2f4c0c1b56e2521911675aa171bf331d7bab0d15 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 8 Apr 2024 15:28:38 +0200 Subject: [PATCH 126/286] Fixed pre-init script order (#1348) ## Changes `preinit` script needs to be executed before processing configuration files to allow the script to modify the configuration or add own configuration files. --- bundle/config/mutator/mutator.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index fda118271..9a2c828b2 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -9,14 +9,17 @@ import ( func DefaultMutators() []bundle.Mutator { return []bundle.Mutator{ + // Execute preinit script before loading any configuration files. + // It needs to be done before processing configuration files to allow + // the script to modify the configuration or add own configuration files. + scripts.Execute(config.ScriptPreInit), + loader.EntryPoint(), loader.ProcessRootIncludes(), // Verify that the CLI version is within the specified range. VerifyCliVersion(), - // Execute preinit script after loading all configuration files. - scripts.Execute(config.ScriptPreInit), EnvironmentsToTargets(), InitializeVariables(), DefineDefaultTarget(), From 50d3bb4d56591772de07697c1f641e302a694e79 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 8 Apr 2024 16:32:21 +0200 Subject: [PATCH 127/286] Execute preinit after entry point to make sure scripts are loaded (#1351) ## Changes Execute preinit after entry point to make sure scripts are loaded --- bundle/config/mutator/mutator.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index 9a2c828b2..ae0d7e5fb 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -9,12 +9,12 @@ import ( func DefaultMutators() []bundle.Mutator { return []bundle.Mutator{ - // Execute preinit script before loading any configuration files. + loader.EntryPoint(), + + // Execute preinit script before processing includes. // It needs to be done before processing configuration files to allow // the script to modify the configuration or add own configuration files. scripts.Execute(config.ScriptPreInit), - - loader.EntryPoint(), loader.ProcessRootIncludes(), // Verify that the CLI version is within the specified range. From d0642023cbd51fb10449b1db971c508c388fc783 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 20:41:59 +0530 Subject: [PATCH 128/286] Bump golang.org/x/sync from 0.6.0 to 0.7.0 (#1346) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.6.0 to 0.7.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/sync&package-manager=go_modules&previous-version=0.6.0&new-version=0.7.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 25c1320a2..6b9fc7a9d 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.17.0 golang.org/x/oauth2 v0.19.0 - golang.org/x/sync v0.6.0 + golang.org/x/sync v0.7.0 golang.org/x/term v0.19.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 diff --git a/go.sum b/go.sum index 76e9e4be9..c33ebe4c8 100644 --- a/go.sum +++ b/go.sum @@ -196,8 +196,8 @@ golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From d914a1b1e20843fd86415b9ca82772ab9aac1339 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 10 Apr 2024 11:55:02 +0200 Subject: [PATCH 129/286] Do not emit warning on YAML anchor blocks (#1354) ## Changes In 0.217.0 we started to emit warning on unknown fields in YAML configuration but wrongly considered YAML anchor blocks as unknown field. This PR fixes this by skipping normalising of YAML blocks. ## Tests Added regression tests --- bundle/tests/loader.go | 15 +++++++++++--- .../databricks.yml | 15 ++++++++++++++ bundle/tests/yaml_anchors_test.go | 12 +++++++++++ libs/dyn/convert/normalize.go | 15 ++++++++------ libs/dyn/convert/normalize_test.go | 20 +++++++++++++++++++ 5 files changed, 68 insertions(+), 9 deletions(-) create mode 100644 bundle/tests/yaml_anchors_separate_block/databricks.yml diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index e7cf18f73..8eddcf9a1 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/libs/diag" "github.com/stretchr/testify/require" ) @@ -20,9 +21,18 @@ func load(t *testing.T, path string) *bundle.Bundle { } func loadTarget(t *testing.T, path, env string) *bundle.Bundle { + b, diags := loadTargetWithDiags(path, env) + require.NoError(t, diags.Error()) + return b +} + +func loadTargetWithDiags(path, env string) (*bundle.Bundle, diag.Diagnostics) { ctx := context.Background() b, err := bundle.Load(ctx, path) - require.NoError(t, err) + if err != nil { + return nil, diag.FromErr(err) + } + diags := bundle.Apply(ctx, b, bundle.Seq( phases.LoadNamedTarget(env), mutator.RewriteSyncPaths(), @@ -30,6 +40,5 @@ func loadTarget(t *testing.T, path, env string) *bundle.Bundle { mutator.MergeJobTasks(), mutator.MergePipelineClusters(), )) - require.NoError(t, diags.Error()) - return b + return b, diags } diff --git a/bundle/tests/yaml_anchors_separate_block/databricks.yml b/bundle/tests/yaml_anchors_separate_block/databricks.yml new file mode 100644 index 000000000..447d5d0bb --- /dev/null +++ b/bundle/tests/yaml_anchors_separate_block/databricks.yml @@ -0,0 +1,15 @@ +bundle: + name: yaml_anchors_separate_block + +tags: &custom_tags + Tag1: "Value1" + Tag2: "Value2" + Tag3: "Value3" + +resources: + jobs: + my_job: + tasks: + - task_key: yaml_anchors_separate_block + tags: + <<: *custom_tags diff --git a/bundle/tests/yaml_anchors_test.go b/bundle/tests/yaml_anchors_test.go index 95cec30ad..5c8497051 100644 --- a/bundle/tests/yaml_anchors_test.go +++ b/bundle/tests/yaml_anchors_test.go @@ -19,6 +19,18 @@ func TestYAMLAnchors(t *testing.T) { require.NotNil(t, t0) require.NotNil(t, t1) + require.NotNil(t, t0.NewCluster) + require.NotNil(t, t1.NewCluster) assert.Equal(t, "10.4.x-scala2.12", t0.NewCluster.SparkVersion) assert.Equal(t, "10.4.x-scala2.12", t1.NewCluster.SparkVersion) } + +func TestYAMLAnchorsNoWarnings(t *testing.T) { + _, diags := loadTargetWithDiags("./yaml_anchors", "default") + assert.Empty(t, diags) +} + +func TestYAMLAnchorsSeparateBlockNoWarnings(t *testing.T) { + _, diags := loadTargetWithDiags("./yaml_anchors_separate_block", "default") + assert.Empty(t, diags) +} diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 296e2abb2..b4bee9773 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -89,14 +89,17 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen for _, pair := range src.MustMap().Pairs() { pk := pair.Key pv := pair.Value + index, ok := info.Fields[pk.MustString()] if !ok { - diags = diags.Append(diag.Diagnostic{ - Severity: diag.Warning, - Summary: fmt.Sprintf("unknown field: %s", pk.MustString()), - Location: pk.Location(), - Path: path, - }) + if !pv.IsAnchor() { + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("unknown field: %s", pk.MustString()), + Location: pk.Location(), + Path: path, + }) + } continue } diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 133eaef8f..1a0869a9f 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -659,3 +659,23 @@ func TestNormalizeFloatError(t *testing.T) { Path: dyn.EmptyPath, }, err[0]) } + +func TestNormalizeAnchors(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "anchor": dyn.V("anchor").MarkAnchor(), + }) + + vout, err := Normalize(typ, vin) + assert.Len(t, err, 0) + + // The field that can be mapped to the struct field is retained. + assert.Equal(t, map[string]any{ + "foo": "bar", + }, vout.AsAny()) +} From 9e1738deee216fdadeb373af2f528332fc4f66be Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 10 Apr 2024 13:17:21 +0200 Subject: [PATCH 130/286] Release v0.217.1 (#1355) CLI: * Don't attempt auth in `auth profiles --skip-validate` ([#1282](https://github.com/databricks/cli/pull/1282)). * Fixed typo in error template for auth describe ([#1341](https://github.com/databricks/cli/pull/1341)). Bundles: * Correctly transform libraries in for_each_task block ([#1340](https://github.com/databricks/cli/pull/1340)). * Do not emit warning on YAML anchor blocks ([#1354](https://github.com/databricks/cli/pull/1354)). * Fixed pre-init script order ([#1348](https://github.com/databricks/cli/pull/1348)). * Execute preinit after entry point to make sure scripts are loaded ([#1351](https://github.com/databricks/cli/pull/1351)). Dependency updates: * Bump internal terraform provider version to `1.39` ([#1339](https://github.com/databricks/cli/pull/1339)). * Bump golang.org/x/term from 0.18.0 to 0.19.0 ([#1343](https://github.com/databricks/cli/pull/1343)). * Bump github.com/hashicorp/hc-install from 0.6.3 to 0.6.4 ([#1344](https://github.com/databricks/cli/pull/1344)). * Bump golang.org/x/mod from 0.16.0 to 0.17.0 ([#1345](https://github.com/databricks/cli/pull/1345)). * Bump golang.org/x/oauth2 from 0.18.0 to 0.19.0 ([#1347](https://github.com/databricks/cli/pull/1347)). * Bump golang.org/x/sync from 0.6.0 to 0.7.0 ([#1346](https://github.com/databricks/cli/pull/1346)). --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c2a507290..e29984771 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Version changelog +## 0.217.1 + +CLI: + * Don't attempt auth in `auth profiles --skip-validate` ([#1282](https://github.com/databricks/cli/pull/1282)). + * Fixed typo in error template for auth describe ([#1341](https://github.com/databricks/cli/pull/1341)). + +Bundles: + * Correctly transform libraries in for_each_task block ([#1340](https://github.com/databricks/cli/pull/1340)). + * Do not emit warning on YAML anchor blocks ([#1354](https://github.com/databricks/cli/pull/1354)). + * Fixed pre-init script order ([#1348](https://github.com/databricks/cli/pull/1348)). + * Execute preinit after entry point to make sure scripts are loaded ([#1351](https://github.com/databricks/cli/pull/1351)). + + +Dependency updates: + * Bump internal terraform provider version to `1.39` ([#1339](https://github.com/databricks/cli/pull/1339)). + * Bump golang.org/x/term from 0.18.0 to 0.19.0 ([#1343](https://github.com/databricks/cli/pull/1343)). + * Bump github.com/hashicorp/hc-install from 0.6.3 to 0.6.4 ([#1344](https://github.com/databricks/cli/pull/1344)). + * Bump golang.org/x/mod from 0.16.0 to 0.17.0 ([#1345](https://github.com/databricks/cli/pull/1345)). + * Bump golang.org/x/oauth2 from 0.18.0 to 0.19.0 ([#1347](https://github.com/databricks/cli/pull/1347)). + * Bump golang.org/x/sync from 0.6.0 to 0.7.0 ([#1346](https://github.com/databricks/cli/pull/1346)). + ## 0.217.0 Breaking Change: From 4529b1ab985435913ce1f6f6b9a27d9e60cabf2d Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 10 Apr 2024 14:07:32 +0200 Subject: [PATCH 131/286] Release v0.217.1 (#1356) CLI: * Don't attempt auth in `auth profiles --skip-validate` ([#1282](https://github.com/databricks/cli/pull/1282)). * Fixed typo in error template for auth describe ([#1341](https://github.com/databricks/cli/pull/1341)). Bundles: * Correctly transform libraries in for_each_task block ([#1340](https://github.com/databricks/cli/pull/1340)). * Do not emit warning on YAML anchor blocks ([#1354](https://github.com/databricks/cli/pull/1354)). * Fixed pre-init script order ([#1348](https://github.com/databricks/cli/pull/1348)). * Execute preinit after entry point to make sure scripts are loaded ([#1351](https://github.com/databricks/cli/pull/1351)). Dependency updates: * Bump internal terraform provider version to `1.39` ([#1339](https://github.com/databricks/cli/pull/1339)). * Bump golang.org/x/term from 0.18.0 to 0.19.0 ([#1343](https://github.com/databricks/cli/pull/1343)). * Bump github.com/hashicorp/hc-install from 0.6.3 to 0.6.4 ([#1344](https://github.com/databricks/cli/pull/1344)). * Bump golang.org/x/mod from 0.16.0 to 0.17.0 ([#1345](https://github.com/databricks/cli/pull/1345)). * Bump golang.org/x/oauth2 from 0.18.0 to 0.19.0 ([#1347](https://github.com/databricks/cli/pull/1347)). * Bump golang.org/x/sync from 0.6.0 to 0.7.0 ([#1346](https://github.com/databricks/cli/pull/1346)). ## Changes ## Tests From e42156411bee91c36c832ba210e27d09bd3d1695 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Fri, 12 Apr 2024 11:53:29 +0200 Subject: [PATCH 132/286] Fix compute override for foreach tasks (#1357) ## Changes Fix compute override for foreach tasks. ``` $ databricks bundle deploy --compute-id=xxx ``` ## Tests I added unit tests --- bundle/config/mutator/override_compute.go | 7 +++++- .../config/mutator/override_compute_test.go | 25 +++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index 6b5c89be1..11a661123 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -22,7 +22,12 @@ func (m *overrideCompute) Name() string { func overrideJobCompute(j *resources.Job, compute string) { for i := range j.Tasks { - task := &j.Tasks[i] + var task = &j.Tasks[i] + + if task.ForEachTask != nil { + task = &task.ForEachTask.Task + } + if task.NewCluster != nil || task.ExistingClusterId != "" || task.ComputeKey != "" || task.JobClusterKey != "" { task.NewCluster = nil task.JobClusterKey = "" diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index e5087167d..7c0e1cefa 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -115,6 +115,31 @@ func TestOverridePipelineTask(t *testing.T) { assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } +func TestOverrideForEachTask(t *testing.T) { + t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": {JobSettings: &jobs.JobSettings{ + Name: "job1", + Tasks: []jobs.Task{ + { + ForEachTask: &jobs.ForEachTask{}, + }, + }, + }}, + }, + }, + }, + } + + m := mutator.OverrideCompute() + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task) +} + func TestOverrideProduction(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ From 5140a9a9022154018dc01c7dfb52956a817f6814 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 12 Apr 2024 20:52:30 +0530 Subject: [PATCH 133/286] Add docker images for the CLI (#1353) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This PR makes changes to support creating a docker image for the CLI with the `terraform` dependencies built in. This is useful for customers that operate in a network-restricted environment. Normally DABs makes API calls to registry.terraform.io to setup the terraform dependencies, with this setup the CLI/DABs will rely on the provider binaries bundled in the docker image. ### Specifically this PR makes the following changes: ---------------- Modifies the CLI release workflow to publish the docker images in the Github Container Registry. URL: https://github.com/databricks/cli/pkgs/container/cli. We use docker support in `goreleaser` to build and publish the images. Using goreleaser ensures the CLI packaged in the docker image is the same release artifact as the normal releases. For more information see: 1. https://goreleaser.com/cookbooks/multi-platform-docker-images 2. https://goreleaser.com/customization/docker/ Other choices made include: 1. Using `alpine` as the base image. The reason is `alpine` is a small and lightweight linux distribution (~5MB) and an industry standard. 2. Not using [docker manifest](https://docs.docker.com/reference/cli/docker/manifest) to create a multi-arch build. This is because the functionality is still experimental. ------------------ Make the `DATABRICKS_TF_VERSION` and `DATABRICKS_TF_PROVIDER_VERSION` environment variables optional for using the terraform file mirror. While it's not strictly necessary to make the docker image work, it's the "right" behaviour and reduces complexity. The rationale is: - These environment variables here are needed so the Databricks CLI does not accidentally use the file mirror bundled with VSCode if it's incompatible. This does not require the env vars to be mandatory. context: https://github.com/databricks/cli/pull/1294 - This makes the `Dockerfile` and `setup.sh` simpler. We don't need an [entrypoint.sh script to set the version environment variables](https://medium.com/@leonardo5621_66451/learn-how-to-use-entrypoint-scripts-in-docker-images-fede010f172d). This also makes using an interactive terminal with `docker run -it ...` work out of the box. ## Tests Tested manually. -------------------- To test the release pipeline I triggered a couple of dummy releases and verified that the images are built successfully and uploaded to Github. 1. https://github.com/databricks/cli/pkgs/container/cli 3. workflow for release: https://github.com/databricks/cli/actions/runs/8646106333 -------------------- I tested the docker container itself by setting up [Charles](https://www.charlesproxy.com/) as an HTTP proxy and verifying that no HTTP requests are made to `registry.terraform.io` Before: FYI, The Charles web proxy is hosted at localhost:8888. ``` shreyas.goenka@THW32HFW6T bundle-playground % rm -r .databricks shreyas.goenka@THW32HFW6T bundle-playground % HTTP_PROXY="http://localhost:8888" HTTPS_PROXY="http://localhost:8888" cli bundle deploy Uploading bundle files to /Users/shreyas.goenka@databricks.com/.bundle/bundle-playground/default/files... Deploying resources... Updating deployment state... Deployment complete! ``` Screenshot 2024-04-11 at 3 21 45 PM After: This time bundle deploy is run from inside the docker container. We use `host.docker.internal` to map to localhost on the host machine, and -v to mount the host file system as a volume. ``` shreyas.goenka@THW32HFW6T bundle-playground % docker run -v ~/projects/bundle-playground:/bundle -v ~/.databrickscfg:/root/.databrickscfg -it --entrypoint /bin/sh -e HTTP_PROXY="http://host.docker.internal:8888" -e HTTPS_PROXY="http://host.docker.internal:8888" --network host ghcr.io/databricks/cli:latest-arm64 / # cd /bundle/ /bundle # rm -r .databricks/ /bundle # databricks bundle deploy Uploading bundle files to /Users/shreyas.goenka@databricks.com/.bundle/bundle-playground/default/files... Deploying resources... Updating deployment state... Deployment complete! ``` Screenshot 2024-04-11 at 3 22 54 PM --- .github/workflows/release.yml | 13 +++++++ .goreleaser.yaml | 31 +++++++++++++++ Dockerfile | 24 ++++++++++++ bundle/deploy/terraform/init.go | 26 +++++++++---- bundle/deploy/terraform/init_test.go | 58 ++++++++++++++++++++++++++++ docker/config.tfrc | 6 +++ docker/setup.sh | 17 ++++++++ 7 files changed, 168 insertions(+), 7 deletions(-) create mode 100644 Dockerfile create mode 100644 docker/config.tfrc create mode 100755 docker/setup.sh diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 43ceea2cd..f9b4ec15f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -24,6 +24,19 @@ jobs: with: go-version: 1.21.x + # Log into the GitHub Container Registry. The goreleaser action will create + # the docker images and push them to the GitHub Container Registry. + - uses: "docker/login-action@v3" + with: + registry: "ghcr.io" + username: "${{ github.actor }}" + password: "${{ secrets.GITHUB_TOKEN }}" + + # QEMU is required to build cross platform docker images using buildx. + # It allows virtualization of the CPU architecture at the application level. + - name: Set up QEMU dependency + uses: docker/setup-qemu-action@v3 + - name: Run GoReleaser id: releaser uses: goreleaser/goreleaser-action@v4 diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 0cf87a9ce..e44068747 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -45,6 +45,37 @@ archives: # file name then additional logic to clean up older builds would be needed. name_template: 'databricks_cli_{{ if not .IsSnapshot }}{{ .Version }}_{{ end }}{{ .Os }}_{{ .Arch }}' +dockers: + - id: arm64 + goarch: arm64 + # We need to use buildx to build arm64 image on a amd64 machine. + use: buildx + image_templates: + # Docker tags can't have "+" in them, so we replace it with "-" + - 'ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-arm64' + - 'ghcr.io/databricks/cli:latest-arm64' + build_flag_templates: + - "--build-arg=ARCH=arm64" + - "--platform=linux/arm64" + extra_files: + - "./docker/config.tfrc" + - "./docker/setup.sh" + + - id: amd64 + goarch: amd64 + use: buildx + image_templates: + # Docker tags can't have "+" in them, so we replace it with "-" + - 'ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-amd64' + - 'ghcr.io/databricks/cli:latest-amd64' + build_flag_templates: + - "--build-arg=ARCH=amd64" + - "--platform=linux/amd64" + extra_files: + - "./docker/config.tfrc" + - "./docker/setup.sh" + + checksum: name_template: 'databricks_cli_{{ .Version }}_SHA256SUMS' algorithm: sha256 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..d4e7614c8 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,24 @@ +FROM alpine:3.19 as builder + +RUN ["apk", "add", "jq"] + +WORKDIR /build + +COPY ./docker/setup.sh /build/docker/setup.sh +COPY ./databricks /app/databricks +COPY ./docker/config.tfrc /app/config/config.tfrc + +ARG ARCH +RUN /build/docker/setup.sh + +# Start from a fresh base image, to remove any build artifacts and scripts. +FROM alpine:3.19 + +ENV DATABRICKS_TF_EXEC_PATH "/app/bin/terraform" +ENV DATABRICKS_TF_CLI_CONFIG_FILE "/app/config/config.tfrc" +ENV PATH="/app:${PATH}" + +COPY --from=builder /app /app + +ENTRYPOINT ["/app/databricks"] +CMD ["-h"] diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 9f4235310..69ae70ba6 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -138,23 +138,35 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error { func getEnvVarWithMatchingVersion(ctx context.Context, envVarName string, versionVarName string, currentVersion string) (string, error) { envValue := env.Get(ctx, envVarName) versionValue := env.Get(ctx, versionVarName) - if envValue == "" || versionValue == "" { - log.Debugf(ctx, "%s and %s aren't defined", envVarName, versionVarName) - return "", nil - } - if versionValue != currentVersion { - log.Debugf(ctx, "%s as %s does not match the current version %s, ignoring %s", versionVarName, versionValue, currentVersion, envVarName) + + // return early if the environment variable is not set + if envValue == "" { + log.Debugf(ctx, "%s is not defined", envVarName) return "", nil } + + // If the path does not exist, we return early. _, err := os.Stat(envValue) if err != nil { if os.IsNotExist(err) { - log.Debugf(ctx, "%s at %s does not exist, ignoring %s", envVarName, envValue, versionVarName) + log.Debugf(ctx, "%s at %s does not exist", envVarName, envValue) return "", nil } else { return "", err } } + + // If the version environment variable is not set, we directly return the value of the environment variable. + if versionValue == "" { + return envValue, nil + } + + // When the version environment variable is set, we check if it matches the current version. + // If it does not match, we return an empty string. + if versionValue != currentVersion { + log.Debugf(ctx, "%s as %s does not match the current version %s, ignoring %s", versionVarName, versionValue, currentVersion, envVarName) + return "", nil + } return envValue, nil } diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index ece897193..ffc215851 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/hashicorp/hc-install/product" "github.com/stretchr/testify/assert" @@ -392,3 +393,60 @@ func createTempFile(t *testing.T, dest string, name string, executable bool) str } return binPath } + +func TestGetEnvVarWithMatchingVersion(t *testing.T) { + envVarName := "FOO" + versionVarName := "FOO_VERSION" + + tmp := t.TempDir() + testutil.Touch(t, tmp, "bar") + + var tc = []struct { + envValue string + versionValue string + currentVersion string + expected string + }{ + { + envValue: filepath.Join(tmp, "bar"), + versionValue: "1.2.3", + currentVersion: "1.2.3", + expected: filepath.Join(tmp, "bar"), + }, + { + envValue: filepath.Join(tmp, "does-not-exist"), + versionValue: "1.2.3", + currentVersion: "1.2.3", + expected: "", + }, + { + envValue: filepath.Join(tmp, "bar"), + versionValue: "1.2.3", + currentVersion: "1.2.4", + expected: "", + }, + { + envValue: "", + versionValue: "1.2.3", + currentVersion: "1.2.3", + expected: "", + }, + { + envValue: filepath.Join(tmp, "bar"), + versionValue: "", + currentVersion: "1.2.3", + expected: filepath.Join(tmp, "bar"), + }, + } + + for _, c := range tc { + t.Run("", func(t *testing.T) { + t.Setenv(envVarName, c.envValue) + t.Setenv(versionVarName, c.versionValue) + + actual, err := getEnvVarWithMatchingVersion(context.Background(), envVarName, versionVarName, c.currentVersion) + require.NoError(t, err) + assert.Equal(t, c.expected, actual) + }) + } +} diff --git a/docker/config.tfrc b/docker/config.tfrc new file mode 100644 index 000000000..123f6d639 --- /dev/null +++ b/docker/config.tfrc @@ -0,0 +1,6 @@ +provider_installation { + filesystem_mirror { + path = "/app/providers" + include = ["registry.terraform.io/databricks/databricks"] + } +} diff --git a/docker/setup.sh b/docker/setup.sh new file mode 100755 index 000000000..3f6c09dc7 --- /dev/null +++ b/docker/setup.sh @@ -0,0 +1,17 @@ +#!/bin/sh +set -euo pipefail + +DATABRICKS_TF_VERSION=$(/app/databricks bundle debug terraform --output json | jq -r .terraform.version) +DATABRICKS_TF_PROVIDER_VERSION=$(/app/databricks bundle debug terraform --output json | jq -r .terraform.providerVersion) + +# Download the terraform binary +mkdir -p zip +wget https://releases.hashicorp.com/terraform/${DATABRICKS_TF_VERSION}/terraform_${DATABRICKS_TF_VERSION}_linux_${ARCH}.zip -O zip/terraform.zip +unzip zip/terraform.zip -d zip/terraform +mkdir -p /app/bin +mv zip/terraform/terraform /app/bin/terraform + +# Download the provider plugin +TF_PROVIDER_NAME=terraform-provider-databricks_${DATABRICKS_TF_PROVIDER_VERSION}_linux_${ARCH}.zip +mkdir -p /app/providers/registry.terraform.io/databricks/databricks +wget https://github.com/databricks/terraform-provider-databricks/releases/download/v${DATABRICKS_TF_PROVIDER_VERSION}/${TF_PROVIDER_NAME} -O /app/providers/registry.terraform.io/databricks/databricks/${TF_PROVIDER_NAME} From ed56bbca16c52f8cc2c1d7fe58782a10eff2440c Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 12 Apr 2024 18:00:42 +0200 Subject: [PATCH 134/286] Transform artifact files source patterns in build not upload stage (#1359) ## Changes Transform artifact files source patterns in build not upload stage Resolves the following warning ``` artifact section is not defined for file at /Users/andrew.nester/dabs/wheel/target/myjar.jar. Skipping uploading. In order to use the define 'artifacts' section ``` ## Tests Unit test pass --- bundle/artifacts/build.go | 31 +++++++++++++++++++++++++++++++ bundle/artifacts/upload.go | 31 ------------------------------- bundle/artifacts/upload_test.go | 14 ++++++++++++-- 3 files changed, 43 insertions(+), 33 deletions(-) diff --git a/bundle/artifacts/build.go b/bundle/artifacts/build.go index 349b1ff89..722891ada 100644 --- a/bundle/artifacts/build.go +++ b/bundle/artifacts/build.go @@ -6,6 +6,7 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/diag" ) @@ -34,6 +35,36 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return diag.Errorf("artifact doesn't exist: %s", m.name) } + // Check if source paths are absolute, if not, make them absolute + for k := range artifact.Files { + f := &artifact.Files[k] + if !filepath.IsAbs(f.Source) { + dirPath := filepath.Dir(artifact.ConfigFilePath) + f.Source = filepath.Join(dirPath, f.Source) + } + } + + // Expand any glob reference in files source path + files := make([]config.ArtifactFile, 0, len(artifact.Files)) + for _, f := range artifact.Files { + matches, err := filepath.Glob(f.Source) + if err != nil { + return diag.Errorf("unable to find files for %s: %v", f.Source, err) + } + + if len(matches) == 0 { + return diag.Errorf("no files found for %s", f.Source) + } + + for _, match := range matches { + files = append(files, config.ArtifactFile{ + Source: match, + }) + } + } + + artifact.Files = files + // Skip building if build command is not specified or infered if artifact.BuildCommand == "" { // If no build command was specified or infered and there is no diff --git a/bundle/artifacts/upload.go b/bundle/artifacts/upload.go index e2c2fc1c9..5c12c9444 100644 --- a/bundle/artifacts/upload.go +++ b/bundle/artifacts/upload.go @@ -3,10 +3,8 @@ package artifacts import ( "context" "fmt" - "path/filepath" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/workspace" ) @@ -44,35 +42,6 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return diag.Errorf("artifact source is not configured: %s", m.name) } - // Check if source paths are absolute, if not, make them absolute - for k := range artifact.Files { - f := &artifact.Files[k] - if !filepath.IsAbs(f.Source) { - dirPath := filepath.Dir(artifact.ConfigFilePath) - f.Source = filepath.Join(dirPath, f.Source) - } - } - - // Expand any glob reference in files source path - files := make([]config.ArtifactFile, 0, len(artifact.Files)) - for _, f := range artifact.Files { - matches, err := filepath.Glob(f.Source) - if err != nil { - return diag.Errorf("unable to find files for %s: %v", f.Source, err) - } - - if len(matches) == 0 { - return diag.Errorf("no files found for %s", f.Source) - } - - for _, match := range matches { - files = append(files, config.ArtifactFile{ - Source: match, - }) - } - } - - artifact.Files = files return bundle.Apply(ctx, b, getUploadMutator(artifact.Type, m.name)) } diff --git a/bundle/artifacts/upload_test.go b/bundle/artifacts/upload_test.go index 687d73b4a..cf08843a7 100644 --- a/bundle/artifacts/upload_test.go +++ b/bundle/artifacts/upload_test.go @@ -58,7 +58,12 @@ func TestExpandGlobFilesSource(t *testing.T) { return &noop{} } - diags := bundle.Apply(context.Background(), b, u) + bm := &build{"test"} + buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { + return &noop{} + } + + diags := bundle.Apply(context.Background(), b, bundle.Seq(bm, u)) require.NoError(t, diags.Error()) require.Equal(t, 2, len(b.Config.Artifacts["test"].Files)) @@ -94,6 +99,11 @@ func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) { return &noop{} } - diags := bundle.Apply(context.Background(), b, u) + bm := &build{"test"} + buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { + return &noop{} + } + + diags := bundle.Apply(context.Background(), b, bundle.Seq(bm, u)) require.ErrorContains(t, diags.Error(), "no files found for") } From 1f1fe4c6a80908e500e2ad2f25320908a3cc6117 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 15 Apr 2024 22:13:46 +0530 Subject: [PATCH 135/286] Add URLs for authentication documentation to the auth command help (#1365) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ``` ➜ cli git:(fix/better-auth-docs) ✗ cli auth -h Authentication related commands. For more information regarding how authentication for the Databricks CLI and SDKs work please refer to the documentation linked below. AWS: https://docs.databricks.com/en/dev-tools/auth/index.html Azure: https://learn.microsoft.com/en-us/azure/databricks/dev-tools/auth GCP: https://docs.gcp.databricks.com/en/dev-tools/auth/index.html ``` --- cmd/auth/auth.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/auth/auth.go b/cmd/auth/auth.go index 59de76111..947d8940f 100644 --- a/cmd/auth/auth.go +++ b/cmd/auth/auth.go @@ -12,6 +12,13 @@ func New() *cobra.Command { cmd := &cobra.Command{ Use: "auth", Short: "Authentication related commands", + Long: `Authentication related commands. For more information regarding how +authentication for the Databricks CLI and SDKs work please refer to the documentation +linked below. + +AWS: https://docs.databricks.com/en/dev-tools/auth/index.html +Azure: https://learn.microsoft.com/en-us/azure/databricks/dev-tools/auth +GCP: https://docs.gcp.databricks.com/en/dev-tools/auth/index.html`, } var perisistentAuth auth.PersistentAuth From b71f853649069af3729a8175ba5528be48b64fe3 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 15 Apr 2024 23:01:00 +0530 Subject: [PATCH 136/286] Do not prefill https:// in prompt for Databricks Host (#1364) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This PR is a minor UX improvement. By not autofilling the https:// prefix in Databricks Host we allow users to directly copy-paste from their browser. UX: ``` ➜ cli git:(fix/copy-host) cli auth login Databricks Profile Name: my-profile Databricks Host (e.g. https://.cloud.databricks.com): https://foobar.cloud.databricks.com Profile my-profile was successfully saved ``` ## Tests Manually. --- cmd/auth/auth.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/auth/auth.go b/cmd/auth/auth.go index 947d8940f..4af2a7a71 100644 --- a/cmd/auth/auth.go +++ b/cmd/auth/auth.go @@ -35,9 +35,7 @@ GCP: https://docs.gcp.databricks.com/en/dev-tools/auth/index.html`, func promptForHost(ctx context.Context) (string, error) { prompt := cmdio.Prompt(ctx) - prompt.Label = "Databricks Host" - prompt.Default = "https://" - prompt.AllowEdit = true + prompt.Label = "Databricks Host (e.g. https://.cloud.databricks.com)" // Validate? host, err := prompt.Run() if err != nil { From 2a7746c8653e567335e8f1c5aad0dc3f0980ffd0 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 16 Apr 2024 15:40:47 +0530 Subject: [PATCH 137/286] Skip building docker images in release snapshot workflow (#1367) ## Changes We don't need docker images for the snapshot version of the CLI. ## Tests Trigged workflow for snapshot passes. https://github.com/databricks/cli/actions/runs/8703599475/job/23870125852?pr=1367 --- .github/workflows/release-snapshot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index d092a6693..bd89417e2 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -30,7 +30,7 @@ jobs: uses: goreleaser/goreleaser-action@v4 with: version: latest - args: release --snapshot + args: release --snapshot --skip docker - name: Upload macOS binaries uses: actions/upload-artifact@v3 From 569bb1cf6322af888b355ab24ecdbd9f64c5e927 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 16 Apr 2024 16:56:19 +0530 Subject: [PATCH 138/286] Add support for multi-arch Docker images (#1362) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This PR follows instructions in https://goreleaser.com/cookbooks/multi-platform-docker-images/ to create a multi-arch docker CLI image. Thus customers can simply specify `docker pull ghcr.io/databricks/cli:latest` to pull and run the image. The current approach uses the `docker manifest` support in goreleaser to create a multi-arch image. This has a couple of pros and cons. TLDR; The changes as is in the PR are good to go and very low risk. The information provided here is just FYI. pros: Fewer configurations/workflows for us to manage/maintain. Goreleaser makes sure the correct CLI binary is in place when building the CLI and also takes care of publishing it to the Github Container Registry. cons: Goreleaser only supports [docker manifest](https://docs.docker.com/reference/cli/docker/manifest/) to create multi-arch images. This has a few minor disadvantages: 1. `goreleaser` pushes all intermediate images (arm64 and and64 specific images) to the registry. This is required for the manifest to reference them. See: https://github.com/goreleaser/goreleaser/issues/2606 Note: We have a migration path here, if someday we stop publishing intermediate images, we can simply tag the "multi-arch" image as both `latest-amd64` and `latest-arm64`. For now, these are separate images. see: https://github.com/databricks/cli/pkgs/container/cli 2. `docker manifest` is technically an experimental command. Though it's been out for multiple years now and the indirect dependency by `goreleaser` should be fine. In any case, we can migrate by moving our docker build process off goreleaser if we need to. ## Tests Tested manually by publishing a new release for `v0.0.0-docker` in ghcr.io. 1. Package: https://github.com/databricks/cli/pkgs/container/cli 2. Release workflow: https://github.com/databricks/cli/actions/runs/8689359851 Tests the image itself by running it manually: ``` ➜ cli git:(feature/multi-arch-docker) docker pull ghcr.io/databricks/cli:latest latest: Pulling from databricks/cli bca4290a9639: Already exists 6d445556910d: Already exists Digest: sha256:82eabc500b541a89182aed4d3158c955e57c1e84d9616b76510aceb1d9024425 Status: Downloaded newer image for ghcr.io/databricks/cli:latest ghcr.io/databricks/cli:latest What's Next? View a summary of image vulnerabilities and recommendations → docker scout quickview ghcr.io/databricks/cli:latest ➜ cli git:(feature/multi-arch-docker) docker run ghcr.io/databricks/cli --version Databricks CLI v0.0.0-docker ``` --- .goreleaser.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index e44068747..d37876edb 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -75,6 +75,16 @@ dockers: - "./docker/config.tfrc" - "./docker/setup.sh" +docker_manifests: + - name_template: ghcr.io/databricks/cli:{{replace .Version "+" "-"}} + image_templates: + - ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-amd64 + - ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-arm64 + - name_template: ghcr.io/databricks/cli:latest + image_templates: + - ghcr.io/databricks/cli:latest-amd64 + - ghcr.io/databricks/cli:latest-arm64 + checksum: name_template: 'databricks_cli_{{ .Version }}_SHA256SUMS' From c949655f9f6044a826f4910521a4249bcea8f882 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Apr 2024 14:03:21 +0200 Subject: [PATCH 139/286] Bump github.com/databricks/databricks-sdk-go from 0.37.0 to 0.38.0 (#1361) [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.37.0&new-version=0.38.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .codegen/service.go.tmpl | 8 +- .gitattributes | 12 + bundle/config/mutator/override_compute.go | 4 +- .../config/mutator/override_compute_test.go | 4 +- bundle/schema/docs/bundle_descriptions.json | 174 +++-- .../workspace-assignment.go | 4 +- cmd/workspace/clusters/clusters.go | 2 + cmd/workspace/cmd.go | 24 + .../consumer-fulfillments.go | 162 +++++ .../consumer-installations.go | 369 +++++++++++ .../consumer-listings/consumer-listings.go | 263 ++++++++ .../consumer-personalization-requests.go | 237 +++++++ .../consumer-providers/consumer-providers.go | 173 +++++ cmd/workspace/groups.go | 4 + .../lakehouse-monitors/lakehouse-monitors.go | 52 +- cmd/workspace/lakeview/lakeview.go | 6 - .../provider-exchange-filters.go | 305 +++++++++ .../provider-exchanges/provider-exchanges.go | 619 ++++++++++++++++++ .../provider-files/provider-files.go | 315 +++++++++ .../provider-listings/provider-listings.go | 375 +++++++++++ .../provider-personalization-requests.go | 187 ++++++ .../provider-provider-analytics-dashboards.go | 252 +++++++ .../provider-providers/provider-providers.go | 374 +++++++++++ go.mod | 2 +- go.sum | 4 +- 26 files changed, 3828 insertions(+), 105 deletions(-) create mode 100755 cmd/workspace/consumer-fulfillments/consumer-fulfillments.go create mode 100755 cmd/workspace/consumer-installations/consumer-installations.go create mode 100755 cmd/workspace/consumer-listings/consumer-listings.go create mode 100755 cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go create mode 100755 cmd/workspace/consumer-providers/consumer-providers.go create mode 100755 cmd/workspace/provider-exchange-filters/provider-exchange-filters.go create mode 100755 cmd/workspace/provider-exchanges/provider-exchanges.go create mode 100755 cmd/workspace/provider-files/provider-files.go create mode 100755 cmd/workspace/provider-listings/provider-listings.go create mode 100755 cmd/workspace/provider-personalization-requests/provider-personalization-requests.go create mode 100755 cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go create mode 100755 cmd/workspace/provider-providers/provider-providers.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 1d88bfb61..0aa4b1028 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -e316cc3d78d087522a74650e26586088da9ac8cb \ No newline at end of file +94684175b8bd65f8701f89729351f8069e8309c9 \ No newline at end of file diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 4887a6230..6aabb02c9 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -145,7 +145,13 @@ func new{{.PascalName}}() *cobra.Command { {{- end}} {{end}} - {{- $excludeFromPrompts := list "workspace get-status" -}} + {{- $excludeFromPrompts := list + "workspace get-status" + "provider-exchanges get" + "provider-exchanges delete" + "provider-exchanges delete-listing-from-exchange" + "provider-exchanges list-exchanges-for-listing" + -}} {{- $fullCommandName := (print $serviceName " " .KebabName) -}} {{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }} diff --git a/.gitattributes b/.gitattributes index c7d605130..f9aa02d18 100755 --- a/.gitattributes +++ b/.gitattributes @@ -38,6 +38,11 @@ cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true cmd/workspace/connections/connections.go linguist-generated=true +cmd/workspace/consumer-fulfillments/consumer-fulfillments.go linguist-generated=true +cmd/workspace/consumer-installations/consumer-installations.go linguist-generated=true +cmd/workspace/consumer-listings/consumer-listings.go linguist-generated=true +cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true +cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true cmd/workspace/csp-enablement/csp-enablement.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true @@ -68,6 +73,13 @@ cmd/workspace/permission-migration/permission-migration.go linguist-generated=tr cmd/workspace/permissions/permissions.go linguist-generated=true cmd/workspace/pipelines/pipelines.go linguist-generated=true cmd/workspace/policy-families/policy-families.go linguist-generated=true +cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true +cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true +cmd/workspace/provider-files/provider-files.go linguist-generated=true +cmd/workspace/provider-listings/provider-listings.go linguist-generated=true +cmd/workspace/provider-personalization-requests/provider-personalization-requests.go linguist-generated=true +cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true +cmd/workspace/provider-providers/provider-providers.go linguist-generated=true cmd/workspace/providers/providers.go linguist-generated=true cmd/workspace/queries/queries.go linguist-generated=true cmd/workspace/query-history/query-history.go linguist-generated=true diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index 11a661123..73fbad364 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -28,10 +28,10 @@ func overrideJobCompute(j *resources.Job, compute string) { task = &task.ForEachTask.Task } - if task.NewCluster != nil || task.ExistingClusterId != "" || task.ComputeKey != "" || task.JobClusterKey != "" { + if task.NewCluster != nil || task.ExistingClusterId != "" || task.EnvironmentKey != "" || task.JobClusterKey != "" { task.NewCluster = nil task.JobClusterKey = "" - task.ComputeKey = "" + task.EnvironmentKey = "" task.ExistingClusterId = compute } } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 7c0e1cefa..152ee543e 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -36,7 +36,7 @@ func TestOverrideDevelopment(t *testing.T) { ExistingClusterId: "cluster2", }, { - ComputeKey: "compute_key", + EnvironmentKey: "environment_key", }, { JobClusterKey: "cluster_key", @@ -58,7 +58,7 @@ func TestOverrideDevelopment(t *testing.T) { assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[3].ExistingClusterId) assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) - assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[2].ComputeKey) + assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[2].EnvironmentKey) assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey) } diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index f1c887ae4..ca889ae52 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -38,6 +38,9 @@ "compute_id": { "description": "" }, + "databricks_cli_version": { + "description": "" + }, "deployment": { "description": "", "properties": { @@ -170,30 +173,11 @@ "additionalproperties": { "description": "", "properties": { - "compute": { - "description": "A list of compute requirements that can be referenced by tasks of this job.", - "items": { - "description": "", - "properties": { - "compute_key": { - "description": "A unique name for the compute requirement. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine the compute requirements for the task execution." - }, - "spec": { - "description": "", - "properties": { - "kind": { - "description": "The kind of compute described by this compute specification." - } - } - } - } - } - }, "continuous": { "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." } } }, @@ -246,6 +230,31 @@ } } }, + "environments": { + "description": "A list of task execution environment specifications that can be referenced by tasks of this job.", + "items": { + "description": "", + "properties": { + "environment_key": { + "description": "The key of an environment. It has to be unique within a job." + }, + "spec": { + "description": "", + "properties": { + "client": { + "description": "*\nUser-friendly name for the client version: “client”: “1”\nThe version is a string, consisting of the major client version" + }, + "dependencies": { + "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]", + "items": { + "description": "" + } + } + } + } + } + } + }, "format": { "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`." }, @@ -322,7 +331,7 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a new cluster that is created for each run.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -401,6 +410,14 @@ } } }, + "clone_from": { + "description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.", + "properties": { + "source_cluster_id": { + "description": "The cluster that is being cloned." + } + } + }, "cluster_log_conf": { "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "properties": { @@ -725,7 +742,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required." @@ -746,9 +763,6 @@ "items": { "description": "", "properties": { - "compute_key": { - "description": "The key of the compute requirement, specified in `job.settings.compute`, to use for execution of this task." - }, "condition_task": { "description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.", "properties": { @@ -785,7 +799,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -844,6 +858,9 @@ } } }, + "environment_key": { + "description": "The key that references an environment spec in a job. This field is required for Python script, Python wheel and dbt tasks when using serverless compute." + }, "existing_cluster_id": { "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs.\nWhen running jobs or tasks on an existing cluster, you may need to manually restart\nthe cluster if it stops responding. We suggest running jobs and tasks on new clusters for\ngreater reliability" }, @@ -938,7 +955,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a new cluster that is created for each run.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -1017,6 +1034,14 @@ } } }, + "clone_from": { + "description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.", + "properties": { + "source_cluster_id": { + "description": "The cluster that is being cloned." + } + } + }, "cluster_log_conf": { "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "properties": { @@ -1277,7 +1302,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." } } }, @@ -1429,7 +1454,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." } } }, @@ -1507,7 +1532,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." } } }, @@ -1609,7 +1634,7 @@ } }, "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." }, "table": { "description": "", @@ -2649,6 +2674,9 @@ "compute_id": { "description": "" }, + "databricks_cli_version": { + "description": "" + }, "deployment": { "description": "", "properties": { @@ -2781,30 +2809,11 @@ "additionalproperties": { "description": "", "properties": { - "compute": { - "description": "A list of compute requirements that can be referenced by tasks of this job.", - "items": { - "description": "", - "properties": { - "compute_key": { - "description": "A unique name for the compute requirement. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine the compute requirements for the task execution." - }, - "spec": { - "description": "", - "properties": { - "kind": { - "description": "The kind of compute described by this compute specification." - } - } - } - } - } - }, "continuous": { "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." } } }, @@ -2857,6 +2866,31 @@ } } }, + "environments": { + "description": "A list of task execution environment specifications that can be referenced by tasks of this job.", + "items": { + "description": "", + "properties": { + "environment_key": { + "description": "The key of an environment. It has to be unique within a job." + }, + "spec": { + "description": "", + "properties": { + "client": { + "description": "*\nUser-friendly name for the client version: “client”: “1”\nThe version is a string, consisting of the major client version" + }, + "dependencies": { + "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]", + "items": { + "description": "" + } + } + } + } + } + } + }, "format": { "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`." }, @@ -2933,7 +2967,7 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a new cluster that is created for each run.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -3012,6 +3046,14 @@ } } }, + "clone_from": { + "description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.", + "properties": { + "source_cluster_id": { + "description": "The cluster that is being cloned." + } + } + }, "cluster_log_conf": { "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "properties": { @@ -3336,7 +3378,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required." @@ -3357,9 +3399,6 @@ "items": { "description": "", "properties": { - "compute_key": { - "description": "The key of the compute requirement, specified in `job.settings.compute`, to use for execution of this task." - }, "condition_task": { "description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.", "properties": { @@ -3396,7 +3435,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -3455,6 +3494,9 @@ } } }, + "environment_key": { + "description": "The key that references an environment spec in a job. This field is required for Python script, Python wheel and dbt tasks when using serverless compute." + }, "existing_cluster_id": { "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs.\nWhen running jobs or tasks on an existing cluster, you may need to manually restart\nthe cluster if it stops responding. We suggest running jobs and tasks on new clusters for\ngreater reliability" }, @@ -3549,7 +3591,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a new cluster that is created for each run.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { "description": "" @@ -3628,6 +3670,14 @@ } } }, + "clone_from": { + "description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.", + "properties": { + "source_cluster_id": { + "description": "The cluster that is being cloned." + } + } + }, "cluster_log_conf": { "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "properties": { @@ -3888,7 +3938,7 @@ "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." } } }, @@ -4040,7 +4090,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." } } }, @@ -4118,7 +4168,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." } } }, @@ -4220,7 +4270,7 @@ } }, "pause_status": { - "description": "Whether this trigger is paused or not." + "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." }, "table": { "description": "", diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index 935d64f05..b965d31ad 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -287,11 +287,11 @@ func newUpdate() *cobra.Command { return fmt.Errorf("invalid PRINCIPAL_ID: %s", args[1]) } - err = a.WorkspaceAssignment.Update(ctx, updateReq) + response, err := a.WorkspaceAssignment.Update(ctx, updateReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index 70afc609b..e657fd9c3 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -193,6 +193,7 @@ func newCreate() *cobra.Command { cmd.Flags().IntVar(&createReq.AutoterminationMinutes, "autotermination-minutes", createReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`) // TODO: complex arg: aws_attributes // TODO: complex arg: azure_attributes + // TODO: complex arg: clone_from // TODO: complex arg: cluster_log_conf cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`) cmd.Flags().Var(&createReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. Supported values: [ @@ -447,6 +448,7 @@ func newEdit() *cobra.Command { cmd.Flags().IntVar(&editReq.AutoterminationMinutes, "autotermination-minutes", editReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`) // TODO: complex arg: aws_attributes // TODO: complex arg: azure_attributes + // TODO: complex arg: clone_from // TODO: complex arg: cluster_log_conf cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`) cmd.Flags().Var(&editReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. Supported values: [ diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 8b0022dcc..a78b9bc1e 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -11,6 +11,11 @@ import ( cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies" clusters "github.com/databricks/cli/cmd/workspace/clusters" connections "github.com/databricks/cli/cmd/workspace/connections" + consumer_fulfillments "github.com/databricks/cli/cmd/workspace/consumer-fulfillments" + consumer_installations "github.com/databricks/cli/cmd/workspace/consumer-installations" + consumer_listings "github.com/databricks/cli/cmd/workspace/consumer-listings" + consumer_personalization_requests "github.com/databricks/cli/cmd/workspace/consumer-personalization-requests" + consumer_providers "github.com/databricks/cli/cmd/workspace/consumer-providers" credentials_manager "github.com/databricks/cli/cmd/workspace/credentials-manager" current_user "github.com/databricks/cli/cmd/workspace/current-user" dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets" @@ -38,6 +43,13 @@ import ( permissions "github.com/databricks/cli/cmd/workspace/permissions" pipelines "github.com/databricks/cli/cmd/workspace/pipelines" policy_families "github.com/databricks/cli/cmd/workspace/policy-families" + provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters" + provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges" + provider_files "github.com/databricks/cli/cmd/workspace/provider-files" + provider_listings "github.com/databricks/cli/cmd/workspace/provider-listings" + provider_personalization_requests "github.com/databricks/cli/cmd/workspace/provider-personalization-requests" + provider_provider_analytics_dashboards "github.com/databricks/cli/cmd/workspace/provider-provider-analytics-dashboards" + provider_providers "github.com/databricks/cli/cmd/workspace/provider-providers" providers "github.com/databricks/cli/cmd/workspace/providers" queries "github.com/databricks/cli/cmd/workspace/queries" query_history "github.com/databricks/cli/cmd/workspace/query-history" @@ -80,6 +92,11 @@ func All() []*cobra.Command { out = append(out, cluster_policies.New()) out = append(out, clusters.New()) out = append(out, connections.New()) + out = append(out, consumer_fulfillments.New()) + out = append(out, consumer_installations.New()) + out = append(out, consumer_listings.New()) + out = append(out, consumer_personalization_requests.New()) + out = append(out, consumer_providers.New()) out = append(out, credentials_manager.New()) out = append(out, current_user.New()) out = append(out, dashboard_widgets.New()) @@ -107,6 +124,13 @@ func All() []*cobra.Command { out = append(out, permissions.New()) out = append(out, pipelines.New()) out = append(out, policy_families.New()) + out = append(out, provider_exchange_filters.New()) + out = append(out, provider_exchanges.New()) + out = append(out, provider_files.New()) + out = append(out, provider_listings.New()) + out = append(out, provider_personalization_requests.New()) + out = append(out, provider_provider_analytics_dashboards.New()) + out = append(out, provider_providers.New()) out = append(out, providers.New()) out = append(out, queries.New()) out = append(out, query_history.New()) diff --git a/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go b/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go new file mode 100755 index 000000000..cd92002a4 --- /dev/null +++ b/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go @@ -0,0 +1,162 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package consumer_fulfillments + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "consumer-fulfillments", + Short: `Fulfillments are entities that allow consumers to preview installations.`, + Long: `Fulfillments are entities that allow consumers to preview installations.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetListingContentMetadataRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetListingContentMetadataRequest + + // TODO: short flags + + cmd.Flags().IntVar(&getReq.PageSize, "page-size", getReq.PageSize, ``) + cmd.Flags().StringVar(&getReq.PageToken, "page-token", getReq.PageToken, ``) + + cmd.Use = "get LISTING_ID" + cmd.Short = `Get listing content metadata.` + cmd.Long = `Get listing content metadata. + + Get a high level preview of the metadata of listing installable content.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.ListingId = args[0] + + response := w.ConsumerFulfillments.Get(ctx, getReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListFulfillmentsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListFulfillmentsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list LISTING_ID" + cmd.Short = `List all listing fulfillments.` + cmd.Long = `List all listing fulfillments. + + Get all listings fulfillments associated with a listing. A _fulfillment_ is a + potential installation. Standard installations contain metadata about the + attached share or git repo. Only one of these fields will be present. + Personalized installations contain metadata about the attached share or git + repo, as well as the Delta Sharing recipient type.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.ListingId = args[0] + + response := w.ConsumerFulfillments.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service ConsumerFulfillments diff --git a/cmd/workspace/consumer-installations/consumer-installations.go b/cmd/workspace/consumer-installations/consumer-installations.go new file mode 100755 index 000000000..9d6c7c894 --- /dev/null +++ b/cmd/workspace/consumer-installations/consumer-installations.go @@ -0,0 +1,369 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package consumer_installations + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "consumer-installations", + Short: `Installations are entities that allow consumers to interact with Databricks Marketplace listings.`, + Long: `Installations are entities that allow consumers to interact with Databricks + Marketplace listings.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListListingInstallations()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateInstallationRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateInstallationRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: accepted_consumer_terms + cmd.Flags().StringVar(&createReq.CatalogName, "catalog-name", createReq.CatalogName, ``) + cmd.Flags().Var(&createReq.RecipientType, "recipient-type", `. Supported values: [DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS, DELTA_SHARING_RECIPIENT_TYPE_OPEN]`) + // TODO: complex arg: repo_detail + cmd.Flags().StringVar(&createReq.ShareName, "share-name", createReq.ShareName, ``) + + cmd.Use = "create LISTING_ID" + cmd.Short = `Install from a listing.` + cmd.Long = `Install from a listing. + + Install payload associated with a Databricks Marketplace listing.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } + createReq.ListingId = args[0] + + response, err := w.ConsumerInstallations.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteInstallationRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteInstallationRequest + + // TODO: short flags + + cmd.Use = "delete LISTING_ID INSTALLATION_ID" + cmd.Short = `Uninstall from a listing.` + cmd.Long = `Uninstall from a listing. + + Uninstall an installation associated with a Databricks Marketplace listing.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.ListingId = args[0] + deleteReq.InstallationId = args[1] + + err = w.ConsumerInstallations.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListAllInstallationsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListAllInstallationsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List all installations.` + cmd.Long = `List all installations. + + List all installations across all listings.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ConsumerInstallations.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start list-listing-installations command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listListingInstallationsOverrides []func( + *cobra.Command, + *marketplace.ListInstallationsRequest, +) + +func newListListingInstallations() *cobra.Command { + cmd := &cobra.Command{} + + var listListingInstallationsReq marketplace.ListInstallationsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listListingInstallationsReq.PageSize, "page-size", listListingInstallationsReq.PageSize, ``) + cmd.Flags().StringVar(&listListingInstallationsReq.PageToken, "page-token", listListingInstallationsReq.PageToken, ``) + + cmd.Use = "list-listing-installations LISTING_ID" + cmd.Short = `List installations for a listing.` + cmd.Long = `List installations for a listing. + + List all installations for a particular listing.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listListingInstallationsReq.ListingId = args[0] + + response := w.ConsumerInstallations.ListListingInstallations(ctx, listListingInstallationsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listListingInstallationsOverrides { + fn(cmd, &listListingInstallationsReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateInstallationRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateInstallationRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&updateReq.RotateToken, "rotate-token", updateReq.RotateToken, ``) + + cmd.Use = "update LISTING_ID INSTALLATION_ID" + cmd.Short = `Update an installation.` + cmd.Long = `Update an installation. + + This is a update API that will update the part of the fields defined in the + installation table as well as interact with external services according to the + fields not included in the installation table 1. the token will be rotate if + the rotateToken flag is true 2. the token will be forcibly rotate if the + rotateToken flag is true and the tokenInfo field is empty` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.ListingId = args[0] + updateReq.InstallationId = args[1] + + response, err := w.ConsumerInstallations.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ConsumerInstallations diff --git a/cmd/workspace/consumer-listings/consumer-listings.go b/cmd/workspace/consumer-listings/consumer-listings.go new file mode 100755 index 000000000..70295dfb3 --- /dev/null +++ b/cmd/workspace/consumer-listings/consumer-listings.go @@ -0,0 +1,263 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package consumer_listings + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "consumer-listings", + Short: `Listings are the core entities in the Marketplace.`, + Long: `Listings are the core entities in the Marketplace. They represent the products + that are available for consumption.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSearch()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetListingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetListingRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get listing.` + cmd.Long = `Get listing. + + Get a published listing in the Databricks Marketplace that the consumer has + access to.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Consumer Listings drop-down." + names, err := w.ConsumerListings.ListingSummaryNameToIdMap(ctx, marketplace.ListListingsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Consumer Listings drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.Id = args[0] + + response, err := w.ConsumerListings.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListListingsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListListingsRequest + + // TODO: short flags + + // TODO: array: assets + // TODO: array: categories + cmd.Flags().BoolVar(&listReq.IsFree, "is-free", listReq.IsFree, `Filters each listing based on if it is free.`) + cmd.Flags().BoolVar(&listReq.IsPrivateExchange, "is-private-exchange", listReq.IsPrivateExchange, `Filters each listing based on if it is a private exchange.`) + cmd.Flags().BoolVar(&listReq.IsStaffPick, "is-staff-pick", listReq.IsStaffPick, `Filters each listing based on whether it is a staff pick.`) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + // TODO: array: provider_ids + // TODO: complex arg: sort_by_spec + // TODO: array: tags + + cmd.Use = "list" + cmd.Short = `List listings.` + cmd.Long = `List listings. + + List all published listings in the Databricks Marketplace that the consumer + has access to.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ConsumerListings.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start search command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var searchOverrides []func( + *cobra.Command, + *marketplace.SearchListingsRequest, +) + +func newSearch() *cobra.Command { + cmd := &cobra.Command{} + + var searchReq marketplace.SearchListingsRequest + + // TODO: short flags + + // TODO: array: assets + // TODO: array: categories + cmd.Flags().BoolVar(&searchReq.IsFree, "is-free", searchReq.IsFree, ``) + cmd.Flags().BoolVar(&searchReq.IsPrivateExchange, "is-private-exchange", searchReq.IsPrivateExchange, ``) + cmd.Flags().IntVar(&searchReq.PageSize, "page-size", searchReq.PageSize, ``) + cmd.Flags().StringVar(&searchReq.PageToken, "page-token", searchReq.PageToken, ``) + // TODO: array: provider_ids + cmd.Flags().Var(&searchReq.SortBy, "sort-by", `. Supported values: [SORT_BY_DATE, SORT_BY_RELEVANCE, SORT_BY_TITLE, SORT_BY_UNSPECIFIED]`) + + cmd.Use = "search QUERY" + cmd.Short = `Search listings.` + cmd.Long = `Search listings. + + Search published listings in the Databricks Marketplace that the consumer has + access to. This query supports a variety of different search parameters and + performs fuzzy matching. + + Arguments: + QUERY: Fuzzy matches query` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No QUERY argument specified. Loading names for Consumer Listings drop-down." + names, err := w.ConsumerListings.ListingSummaryNameToIdMap(ctx, marketplace.ListListingsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Consumer Listings drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Fuzzy matches query") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have fuzzy matches query") + } + searchReq.Query = args[0] + + response := w.ConsumerListings.Search(ctx, searchReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range searchOverrides { + fn(cmd, &searchReq) + } + + return cmd +} + +// end service ConsumerListings diff --git a/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go b/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go new file mode 100755 index 000000000..40ae4c848 --- /dev/null +++ b/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go @@ -0,0 +1,237 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package consumer_personalization_requests + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "consumer-personalization-requests", + Short: `Personalization Requests allow customers to interact with the individualized Marketplace listing flow.`, + Long: `Personalization Requests allow customers to interact with the individualized + Marketplace listing flow.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreatePersonalizationRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreatePersonalizationRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, ``) + cmd.Flags().StringVar(&createReq.Company, "company", createReq.Company, ``) + cmd.Flags().StringVar(&createReq.FirstName, "first-name", createReq.FirstName, ``) + cmd.Flags().BoolVar(&createReq.IsFromLighthouse, "is-from-lighthouse", createReq.IsFromLighthouse, ``) + cmd.Flags().StringVar(&createReq.LastName, "last-name", createReq.LastName, ``) + cmd.Flags().Var(&createReq.RecipientType, "recipient-type", `. Supported values: [DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS, DELTA_SHARING_RECIPIENT_TYPE_OPEN]`) + + cmd.Use = "create LISTING_ID" + cmd.Short = `Create a personalization request.` + cmd.Long = `Create a personalization request. + + Create a personalization request for a listing.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + createReq.ListingId = args[0] + + response, err := w.ConsumerPersonalizationRequests.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetPersonalizationRequestRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetPersonalizationRequestRequest + + // TODO: short flags + + cmd.Use = "get LISTING_ID" + cmd.Short = `Get the personalization request for a listing.` + cmd.Long = `Get the personalization request for a listing. + + Get the personalization request for a listing. Each consumer can make at + *most* one personalization request for a listing.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.ListingId = args[0] + + response, err := w.ConsumerPersonalizationRequests.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListAllPersonalizationRequestsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListAllPersonalizationRequestsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List all personalization requests.` + cmd.Long = `List all personalization requests. + + List personalization requests for a consumer across all listings.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ConsumerPersonalizationRequests.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service ConsumerPersonalizationRequests diff --git a/cmd/workspace/consumer-providers/consumer-providers.go b/cmd/workspace/consumer-providers/consumer-providers.go new file mode 100755 index 000000000..5a0849dce --- /dev/null +++ b/cmd/workspace/consumer-providers/consumer-providers.go @@ -0,0 +1,173 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package consumer_providers + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "consumer-providers", + Short: `Providers are the entities that publish listings to the Marketplace.`, + Long: `Providers are the entities that publish listings to the Marketplace.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetProviderRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetProviderRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get a provider.` + cmd.Long = `Get a provider. + + Get a provider in the Databricks Marketplace with at least one visible + listing.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Consumer Providers drop-down." + names, err := w.ConsumerProviders.ProviderInfoNameToIdMap(ctx, marketplace.ListProvidersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Consumer Providers drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.Id = args[0] + + response, err := w.ConsumerProviders.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListProvidersRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListProvidersRequest + + // TODO: short flags + + cmd.Flags().BoolVar(&listReq.IsFeatured, "is-featured", listReq.IsFeatured, ``) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List providers.` + cmd.Long = `List providers. + + List all providers in the Databricks Marketplace with at least one visible + listing.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ConsumerProviders.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service ConsumerProviders diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 8f3768137..d8a4dec4f 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -64,5 +64,9 @@ func Groups() []cobra.Group { ID: "dashboards", Title: "Dashboards", }, + { + ID: "marketplace", + Title: "Marketplace", + }, } } diff --git a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go index 7e1fe20be..465ed6f92 100755 --- a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go +++ b/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go @@ -68,7 +68,7 @@ func newCancelRefresh() *cobra.Command { // TODO: short flags - cmd.Use = "cancel-refresh FULL_NAME REFRESH_ID" + cmd.Use = "cancel-refresh TABLE_NAME REFRESH_ID" cmd.Short = `Cancel refresh.` cmd.Long = `Cancel refresh. @@ -84,7 +84,7 @@ func newCancelRefresh() *cobra.Command { created. Arguments: - FULL_NAME: Full name of the table. + TABLE_NAME: Full name of the table. REFRESH_ID: ID of the refresh.` // This command is being previewed; hide from help output. @@ -102,7 +102,7 @@ func newCancelRefresh() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - cancelRefreshReq.FullName = args[0] + cancelRefreshReq.TableName = args[0] cancelRefreshReq.RefreshId = args[1] err = w.LakehouseMonitors.CancelRefresh(ctx, cancelRefreshReq) @@ -154,7 +154,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: time_series cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `Optional argument to specify the warehouse for dashboard creation.`) - cmd.Use = "create FULL_NAME ASSETS_DIR OUTPUT_SCHEMA_NAME" + cmd.Use = "create TABLE_NAME ASSETS_DIR OUTPUT_SCHEMA_NAME" cmd.Short = `Create a table monitor.` cmd.Long = `Create a table monitor. @@ -171,7 +171,7 @@ func newCreate() *cobra.Command { where this call was made. Arguments: - FULL_NAME: Full name of the table. + TABLE_NAME: Full name of the table. ASSETS_DIR: The directory to store monitoring assets (e.g. dashboard, metric tables). OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.` @@ -181,7 +181,7 @@ func newCreate() *cobra.Command { if cmd.Flags().Changed("json") { err := root.ExactArgs(1)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, provide only FULL_NAME as positional arguments. Provide 'assets_dir', 'output_schema_name' in your JSON input") + return fmt.Errorf("when --json flag is specified, provide only TABLE_NAME as positional arguments. Provide 'assets_dir', 'output_schema_name' in your JSON input") } return nil } @@ -200,7 +200,7 @@ func newCreate() *cobra.Command { return err } } - createReq.FullName = args[0] + createReq.TableName = args[0] if !cmd.Flags().Changed("json") { createReq.AssetsDir = args[1] } @@ -243,7 +243,7 @@ func newDelete() *cobra.Command { // TODO: short flags - cmd.Use = "delete FULL_NAME" + cmd.Use = "delete TABLE_NAME" cmd.Short = `Delete a table monitor.` cmd.Long = `Delete a table monitor. @@ -262,7 +262,7 @@ func newDelete() *cobra.Command { call; those assets must be manually cleaned up (if desired). Arguments: - FULL_NAME: Full name of the table.` + TABLE_NAME: Full name of the table.` cmd.Annotations = make(map[string]string) @@ -276,7 +276,7 @@ func newDelete() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - deleteReq.FullName = args[0] + deleteReq.TableName = args[0] err = w.LakehouseMonitors.Delete(ctx, deleteReq) if err != nil { @@ -313,7 +313,7 @@ func newGet() *cobra.Command { // TODO: short flags - cmd.Use = "get FULL_NAME" + cmd.Use = "get TABLE_NAME" cmd.Short = `Get a table monitor.` cmd.Long = `Get a table monitor. @@ -331,7 +331,7 @@ func newGet() *cobra.Command { was created. Arguments: - FULL_NAME: Full name of the table.` + TABLE_NAME: Full name of the table.` cmd.Annotations = make(map[string]string) @@ -345,7 +345,7 @@ func newGet() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getReq.FullName = args[0] + getReq.TableName = args[0] response, err := w.LakehouseMonitors.Get(ctx, getReq) if err != nil { @@ -382,7 +382,7 @@ func newGetRefresh() *cobra.Command { // TODO: short flags - cmd.Use = "get-refresh FULL_NAME REFRESH_ID" + cmd.Use = "get-refresh TABLE_NAME REFRESH_ID" cmd.Short = `Get refresh.` cmd.Long = `Get refresh. @@ -398,7 +398,7 @@ func newGetRefresh() *cobra.Command { created. Arguments: - FULL_NAME: Full name of the table. + TABLE_NAME: Full name of the table. REFRESH_ID: ID of the refresh.` cmd.Annotations = make(map[string]string) @@ -413,7 +413,7 @@ func newGetRefresh() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getRefreshReq.FullName = args[0] + getRefreshReq.TableName = args[0] getRefreshReq.RefreshId = args[1] response, err := w.LakehouseMonitors.GetRefresh(ctx, getRefreshReq) @@ -451,7 +451,7 @@ func newListRefreshes() *cobra.Command { // TODO: short flags - cmd.Use = "list-refreshes FULL_NAME" + cmd.Use = "list-refreshes TABLE_NAME" cmd.Short = `List refreshes.` cmd.Long = `List refreshes. @@ -468,7 +468,7 @@ func newListRefreshes() *cobra.Command { created. Arguments: - FULL_NAME: Full name of the table.` + TABLE_NAME: Full name of the table.` cmd.Annotations = make(map[string]string) @@ -482,7 +482,7 @@ func newListRefreshes() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - listRefreshesReq.FullName = args[0] + listRefreshesReq.TableName = args[0] response, err := w.LakehouseMonitors.ListRefreshes(ctx, listRefreshesReq) if err != nil { @@ -519,7 +519,7 @@ func newRunRefresh() *cobra.Command { // TODO: short flags - cmd.Use = "run-refresh FULL_NAME" + cmd.Use = "run-refresh TABLE_NAME" cmd.Short = `Queue a metric refresh for a monitor.` cmd.Long = `Queue a metric refresh for a monitor. @@ -536,7 +536,7 @@ func newRunRefresh() *cobra.Command { created. Arguments: - FULL_NAME: Full name of the table.` + TABLE_NAME: Full name of the table.` cmd.Annotations = make(map[string]string) @@ -550,7 +550,7 @@ func newRunRefresh() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - runRefreshReq.FullName = args[0] + runRefreshReq.TableName = args[0] response, err := w.LakehouseMonitors.RunRefresh(ctx, runRefreshReq) if err != nil { @@ -599,7 +599,7 @@ func newUpdate() *cobra.Command { // TODO: complex arg: snapshot // TODO: complex arg: time_series - cmd.Use = "update FULL_NAME OUTPUT_SCHEMA_NAME" + cmd.Use = "update TABLE_NAME OUTPUT_SCHEMA_NAME" cmd.Short = `Update a table monitor.` cmd.Long = `Update a table monitor. @@ -618,7 +618,7 @@ func newUpdate() *cobra.Command { updated. Arguments: - FULL_NAME: Full name of the table. + TABLE_NAME: Full name of the table. OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.` cmd.Annotations = make(map[string]string) @@ -627,7 +627,7 @@ func newUpdate() *cobra.Command { if cmd.Flags().Changed("json") { err := root.ExactArgs(1)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, provide only FULL_NAME as positional arguments. Provide 'output_schema_name' in your JSON input") + return fmt.Errorf("when --json flag is specified, provide only TABLE_NAME as positional arguments. Provide 'output_schema_name' in your JSON input") } return nil } @@ -646,7 +646,7 @@ func newUpdate() *cobra.Command { return err } } - updateReq.FullName = args[0] + updateReq.TableName = args[0] if !cmd.Flags().Changed("json") { updateReq.OutputSchemaName = args[1] } diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index b0136de20..566853ff9 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -272,9 +272,6 @@ func newMigrate() *cobra.Command { Arguments: SOURCE_DASHBOARD_ID: UUID of the dashboard to be migrated.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -475,9 +472,6 @@ func newUnpublish() *cobra.Command { Arguments: DASHBOARD_ID: UUID identifying the dashboard to be published.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go b/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go new file mode 100755 index 000000000..43ae6da7e --- /dev/null +++ b/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go @@ -0,0 +1,305 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_exchange_filters + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-exchange-filters", + Short: `Marketplace exchanges filters curate which groups can access an exchange.`, + Long: `Marketplace exchanges filters curate which groups can access an exchange.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateExchangeFilterRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateExchangeFilterRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Create a new exchange filter.` + cmd.Long = `Create a new exchange filter. + + Add an exchange filter.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ProviderExchangeFilters.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteExchangeFilterRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteExchangeFilterRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Delete an exchange filter.` + cmd.Long = `Delete an exchange filter. + + Delete an exchange filter` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Provider Exchange Filters drop-down." + names, err := w.ProviderExchangeFilters.ExchangeFilterNameToIdMap(ctx, marketplace.ListExchangeFiltersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Exchange Filters drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + deleteReq.Id = args[0] + + err = w.ProviderExchangeFilters.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListExchangeFiltersRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListExchangeFiltersRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list EXCHANGE_ID" + cmd.Short = `List exchange filters.` + cmd.Long = `List exchange filters. + + List exchange filter` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.ExchangeId = args[0] + + response := w.ProviderExchangeFilters.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateExchangeFilterRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateExchangeFilterRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Update exchange filter.` + cmd.Long = `Update exchange filter. + + Update an exchange filter.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.ProviderExchangeFilters.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderExchangeFilters diff --git a/cmd/workspace/provider-exchanges/provider-exchanges.go b/cmd/workspace/provider-exchanges/provider-exchanges.go new file mode 100755 index 000000000..fe1a9a3dc --- /dev/null +++ b/cmd/workspace/provider-exchanges/provider-exchanges.go @@ -0,0 +1,619 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_exchanges + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-exchanges", + Short: `Marketplace exchanges allow providers to share their listings with a curated set of customers.`, + Long: `Marketplace exchanges allow providers to share their listings with a curated + set of customers.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newAddListingToExchange()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeleteListingFromExchange()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListExchangesForListing()) + cmd.AddCommand(newListListingsForExchange()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start add-listing-to-exchange command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var addListingToExchangeOverrides []func( + *cobra.Command, + *marketplace.AddExchangeForListingRequest, +) + +func newAddListingToExchange() *cobra.Command { + cmd := &cobra.Command{} + + var addListingToExchangeReq marketplace.AddExchangeForListingRequest + var addListingToExchangeJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&addListingToExchangeJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "add-listing-to-exchange LISTING_ID EXCHANGE_ID" + cmd.Short = `Add an exchange for listing.` + cmd.Long = `Add an exchange for listing. + + Associate an exchange with a listing` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'listing_id', 'exchange_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = addListingToExchangeJson.Unmarshal(&addListingToExchangeReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + addListingToExchangeReq.ListingId = args[0] + } + if !cmd.Flags().Changed("json") { + addListingToExchangeReq.ExchangeId = args[1] + } + + response, err := w.ProviderExchanges.AddListingToExchange(ctx, addListingToExchangeReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range addListingToExchangeOverrides { + fn(cmd, &addListingToExchangeReq) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateExchangeRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateExchangeRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Create an exchange.` + cmd.Long = `Create an exchange. + + Create an exchange` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ProviderExchanges.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteExchangeRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteExchangeRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Delete an exchange.` + cmd.Long = `Delete an exchange. + + This removes a listing from marketplace.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Id = args[0] + + err = w.ProviderExchanges.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start delete-listing-from-exchange command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteListingFromExchangeOverrides []func( + *cobra.Command, + *marketplace.RemoveExchangeForListingRequest, +) + +func newDeleteListingFromExchange() *cobra.Command { + cmd := &cobra.Command{} + + var deleteListingFromExchangeReq marketplace.RemoveExchangeForListingRequest + + // TODO: short flags + + cmd.Use = "delete-listing-from-exchange ID" + cmd.Short = `Remove an exchange for listing.` + cmd.Long = `Remove an exchange for listing. + + Disassociate an exchange with a listing` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteListingFromExchangeReq.Id = args[0] + + err = w.ProviderExchanges.DeleteListingFromExchange(ctx, deleteListingFromExchangeReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteListingFromExchangeOverrides { + fn(cmd, &deleteListingFromExchangeReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetExchangeRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetExchangeRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get an exchange.` + cmd.Long = `Get an exchange. + + Get an exchange.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.Id = args[0] + + response, err := w.ProviderExchanges.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListExchangesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListExchangesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List exchanges.` + cmd.Long = `List exchanges. + + List exchanges visible to provider` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ProviderExchanges.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start list-exchanges-for-listing command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listExchangesForListingOverrides []func( + *cobra.Command, + *marketplace.ListExchangesForListingRequest, +) + +func newListExchangesForListing() *cobra.Command { + cmd := &cobra.Command{} + + var listExchangesForListingReq marketplace.ListExchangesForListingRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listExchangesForListingReq.PageSize, "page-size", listExchangesForListingReq.PageSize, ``) + cmd.Flags().StringVar(&listExchangesForListingReq.PageToken, "page-token", listExchangesForListingReq.PageToken, ``) + + cmd.Use = "list-exchanges-for-listing LISTING_ID" + cmd.Short = `List exchanges for listing.` + cmd.Long = `List exchanges for listing. + + List exchanges associated with a listing` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listExchangesForListingReq.ListingId = args[0] + + response := w.ProviderExchanges.ListExchangesForListing(ctx, listExchangesForListingReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listExchangesForListingOverrides { + fn(cmd, &listExchangesForListingReq) + } + + return cmd +} + +// start list-listings-for-exchange command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listListingsForExchangeOverrides []func( + *cobra.Command, + *marketplace.ListListingsForExchangeRequest, +) + +func newListListingsForExchange() *cobra.Command { + cmd := &cobra.Command{} + + var listListingsForExchangeReq marketplace.ListListingsForExchangeRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listListingsForExchangeReq.PageSize, "page-size", listListingsForExchangeReq.PageSize, ``) + cmd.Flags().StringVar(&listListingsForExchangeReq.PageToken, "page-token", listListingsForExchangeReq.PageToken, ``) + + cmd.Use = "list-listings-for-exchange EXCHANGE_ID" + cmd.Short = `List listings for exchange.` + cmd.Long = `List listings for exchange. + + List listings associated with an exchange` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No EXCHANGE_ID argument specified. Loading names for Provider Exchanges drop-down." + names, err := w.ProviderExchanges.ExchangeListingExchangeNameToExchangeIdMap(ctx, marketplace.ListExchangesForListingRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Exchanges drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + listListingsForExchangeReq.ExchangeId = args[0] + + response := w.ProviderExchanges.ListListingsForExchange(ctx, listListingsForExchangeReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listListingsForExchangeOverrides { + fn(cmd, &listListingsForExchangeReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateExchangeRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateExchangeRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Update exchange.` + cmd.Long = `Update exchange. + + Update an exchange` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.ProviderExchanges.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderExchanges diff --git a/cmd/workspace/provider-files/provider-files.go b/cmd/workspace/provider-files/provider-files.go new file mode 100755 index 000000000..b9357f131 --- /dev/null +++ b/cmd/workspace/provider-files/provider-files.go @@ -0,0 +1,315 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_files + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-files", + Short: `Marketplace offers a set of file APIs for various purposes such as preview notebooks and provider icons.`, + Long: `Marketplace offers a set of file APIs for various purposes such as preview + notebooks and provider icons.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateFileRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateFileRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.DisplayName, "display-name", createReq.DisplayName, ``) + + cmd.Use = "create" + cmd.Short = `Create a file.` + cmd.Long = `Create a file. + + Create a file. Currently, only provider icons and attached notebooks are + supported.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ProviderFiles.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteFileRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteFileRequest + + // TODO: short flags + + cmd.Use = "delete FILE_ID" + cmd.Short = `Delete a file.` + cmd.Long = `Delete a file. + + Delete a file` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No FILE_ID argument specified. Loading names for Provider Files drop-down." + names, err := w.ProviderFiles.FileInfoDisplayNameToIdMap(ctx, marketplace.ListFilesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Files drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + deleteReq.FileId = args[0] + + err = w.ProviderFiles.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetFileRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetFileRequest + + // TODO: short flags + + cmd.Use = "get FILE_ID" + cmd.Short = `Get a file.` + cmd.Long = `Get a file. + + Get a file` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No FILE_ID argument specified. Loading names for Provider Files drop-down." + names, err := w.ProviderFiles.FileInfoDisplayNameToIdMap(ctx, marketplace.ListFilesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Files drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.FileId = args[0] + + response, err := w.ProviderFiles.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListFilesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListFilesRequest + var listJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List files.` + cmd.Long = `List files. + + List files attached to a parent entity.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = listJson.Unmarshal(&listReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response := w.ProviderFiles.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service ProviderFiles diff --git a/cmd/workspace/provider-listings/provider-listings.go b/cmd/workspace/provider-listings/provider-listings.go new file mode 100755 index 000000000..4f90f7b9e --- /dev/null +++ b/cmd/workspace/provider-listings/provider-listings.go @@ -0,0 +1,375 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_listings + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-listings", + Short: `Listings are the core entities in the Marketplace.`, + Long: `Listings are the core entities in the Marketplace. They represent the products + that are available for consumption.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateListingRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateListingRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Create a listing.` + cmd.Long = `Create a listing. + + Create a new listing` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ProviderListings.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteListingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteListingRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Delete a listing.` + cmd.Long = `Delete a listing. + + Delete a listing` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Provider Listings drop-down." + names, err := w.ProviderListings.ListingSummaryNameToIdMap(ctx, marketplace.GetListingsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Listings drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + deleteReq.Id = args[0] + + err = w.ProviderListings.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetListingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetListingRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get a listing.` + cmd.Long = `Get a listing. + + Get a listing` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Provider Listings drop-down." + names, err := w.ProviderListings.ListingSummaryNameToIdMap(ctx, marketplace.GetListingsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Listings drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.Id = args[0] + + response, err := w.ProviderListings.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.GetListingsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.GetListingsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List listings.` + cmd.Long = `List listings. + + List listings owned by this provider` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ProviderListings.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateListingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateListingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Update listing.` + cmd.Long = `Update listing. + + Update a listing` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.ProviderListings.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderListings diff --git a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go new file mode 100755 index 000000000..58b3cba1d --- /dev/null +++ b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go @@ -0,0 +1,187 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_personalization_requests + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-personalization-requests", + Short: `Personalization requests are an alternate to instantly available listings.`, + Long: `Personalization requests are an alternate to instantly available listings. + Control the lifecycle of personalized solutions.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListAllPersonalizationRequestsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListAllPersonalizationRequestsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `All personalization requests across all listings.` + cmd.Long = `All personalization requests across all listings. + + List personalization requests to this provider. This will return all + personalization requests, regardless of which listing they are for.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ProviderPersonalizationRequests.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdatePersonalizationRequestRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdatePersonalizationRequestRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Reason, "reason", updateReq.Reason, ``) + // TODO: complex arg: share + + cmd.Use = "update LISTING_ID REQUEST_ID STATUS" + cmd.Short = `Update personalization request status.` + cmd.Long = `Update personalization request status. + + Update personalization request. This method only permits updating the status + of the request.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(2)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only LISTING_ID, REQUEST_ID as positional arguments. Provide 'status' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.ListingId = args[0] + updateReq.RequestId = args[1] + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &updateReq.Status) + if err != nil { + return fmt.Errorf("invalid STATUS: %s", args[2]) + } + } + + response, err := w.ProviderPersonalizationRequests.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderPersonalizationRequests diff --git a/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go b/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go new file mode 100755 index 000000000..70ef0f320 --- /dev/null +++ b/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go @@ -0,0 +1,252 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_provider_analytics_dashboards + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-provider-analytics-dashboards", + Short: `Manage templated analytics solution for providers.`, + Long: `Manage templated analytics solution for providers.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetLatestVersion()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "create" + cmd.Short = `Create provider analytics dashboard.` + cmd.Long = `Create provider analytics dashboard. + + Create provider analytics dashboard. Returns Marketplace specific id. Not to + be confused with the Lakeview dashboard id.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.ProviderProviderAnalyticsDashboards.Create(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "get" + cmd.Short = `Get provider analytics dashboard.` + cmd.Long = `Get provider analytics dashboard. + + Get provider analytics dashboard.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.ProviderProviderAnalyticsDashboards.Get(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd) + } + + return cmd +} + +// start get-latest-version command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getLatestVersionOverrides []func( + *cobra.Command, +) + +func newGetLatestVersion() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "get-latest-version" + cmd.Short = `Get latest version of provider analytics dashboard.` + cmd.Long = `Get latest version of provider analytics dashboard. + + Get latest version of provider analytics dashboard.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.ProviderProviderAnalyticsDashboards.GetLatestVersion(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getLatestVersionOverrides { + fn(cmd) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateProviderAnalyticsDashboardRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateProviderAnalyticsDashboardRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().Int64Var(&updateReq.Version, "version", updateReq.Version, `this is the version of the dashboard template we want to update our user to current expectation is that it should be equal to latest version of the dashboard template.`) + + cmd.Use = "update ID" + cmd.Short = `Update provider analytics dashboard.` + cmd.Long = `Update provider analytics dashboard. + + Update provider analytics dashboard. + + Arguments: + ID: id is immutable property and can't be updated.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.Id = args[0] + + response, err := w.ProviderProviderAnalyticsDashboards.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderProviderAnalyticsDashboards diff --git a/cmd/workspace/provider-providers/provider-providers.go b/cmd/workspace/provider-providers/provider-providers.go new file mode 100755 index 000000000..52f4c45ae --- /dev/null +++ b/cmd/workspace/provider-providers/provider-providers.go @@ -0,0 +1,374 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_providers + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-providers", + Short: `Providers are entities that manage assets in Marketplace.`, + Long: `Providers are entities that manage assets in Marketplace.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateProviderRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateProviderRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Create a provider.` + cmd.Long = `Create a provider. + + Create a provider` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ProviderProviders.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteProviderRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteProviderRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Delete provider.` + cmd.Long = `Delete provider. + + Delete provider` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Provider Providers drop-down." + names, err := w.ProviderProviders.ProviderInfoNameToIdMap(ctx, marketplace.ListProvidersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Providers drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + deleteReq.Id = args[0] + + err = w.ProviderProviders.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetProviderRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetProviderRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get provider.` + cmd.Long = `Get provider. + + Get provider profile` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Provider Providers drop-down." + names, err := w.ProviderProviders.ProviderInfoNameToIdMap(ctx, marketplace.ListProvidersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Providers drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.Id = args[0] + + response, err := w.ProviderProviders.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListProvidersRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListProvidersRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List providers.` + cmd.Long = `List providers. + + List provider profiles for account.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ProviderProviders.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateProviderRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateProviderRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Update provider.` + cmd.Long = `Update provider. + + Update provider profile` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.ProviderProviders.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderProviders diff --git a/go.mod b/go.mod index 6b9fc7a9d..4ba3076b0 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.37.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.38.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index c33ebe4c8..07137405f 100644 --- a/go.sum +++ b/go.sum @@ -30,8 +30,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.37.0 h1:8ej3hNqfyfDNdV5YBjfLbq+p99JLu5NTtzwObbsIhRM= -github.com/databricks/databricks-sdk-go v0.37.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= +github.com/databricks/databricks-sdk-go v0.38.0 h1:MQhOCWTkdKItG+n6ZwcXQv9FWBVXq9fax8VSZns2e+0= +github.com/databricks/databricks-sdk-go v0.38.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 77d6820075989b5de42bf278d02f58ba647a012d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 17 Apr 2024 10:58:07 +0200 Subject: [PATCH 140/286] Convert between integer and float in normalization (#1371) ## Changes We currently issue a warning if an integer is used where a floating point number is expected. But if they are convertible, we should convert and not issue a warning. This change fixes normalization if they are convertible between each other. We still produce a warning if the type conversion leads to a loss in precision. ## Tests Unit tests pass. --- libs/dyn/convert/normalize.go | 20 +++++++++++++ libs/dyn/convert/normalize_test.go | 46 ++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index b4bee9773..35d4d8210 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -293,6 +293,16 @@ func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value, path dyn switch src.Kind() { case dyn.KindInt: out = src.MustInt() + case dyn.KindFloat: + out = int64(src.MustFloat()) + if src.MustFloat() != float64(out) { + return dyn.InvalidValue, diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf(`cannot accurately represent "%g" as integer due to precision loss`, src.MustFloat()), + Location: src.Location(), + Path: path, + }) + } case dyn.KindString: var err error out, err = strconv.ParseInt(src.MustString(), 10, 64) @@ -326,6 +336,16 @@ func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value, path d switch src.Kind() { case dyn.KindFloat: out = src.MustFloat() + case dyn.KindInt: + out = float64(src.MustInt()) + if src.MustInt() != int64(out) { + return dyn.InvalidValue, diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf(`cannot accurately represent "%d" as floating point number due to precision loss`, src.MustInt()), + Location: src.Location(), + Path: path, + }) + } case dyn.KindString: var err error out, err = strconv.ParseFloat(src.MustString(), 64) diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 1a0869a9f..843b4ea59 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -555,6 +555,27 @@ func TestNormalizeIntNil(t *testing.T) { }, err[0]) } +func TestNormalizeIntFromFloat(t *testing.T) { + var typ int + vin := dyn.V(float64(1.0)) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, dyn.V(int64(1)), vout) +} + +func TestNormalizeIntFromFloatError(t *testing.T) { + var typ int + vin := dyn.V(1.5) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `cannot accurately represent "1.5" as integer due to precision loss`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + func TestNormalizeIntFromString(t *testing.T) { var typ int vin := dyn.V("123") @@ -618,6 +639,31 @@ func TestNormalizeFloatNil(t *testing.T) { }, err[0]) } +func TestNormalizeFloatFromInt(t *testing.T) { + var typ float64 + + // Maximum safe integer that can be accurately represented as a float. + vin := dyn.V(int64(9007199254740992)) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, dyn.V(float64(9007199254740992)), vout) +} + +func TestNormalizeFloatFromIntError(t *testing.T) { + var typ float64 + + // Minimum integer that cannot be accurately represented as a float. + vin := dyn.V(9007199254740992 + 1) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `cannot accurately represent "9007199254740993" as floating point number due to precision loss`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + func TestNormalizeFloatFromString(t *testing.T) { var typ float64 vin := dyn.V("1.2") From c3a7d17d1d97db17a585cbb06abfa3ff6a58d4e0 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Wed, 17 Apr 2024 18:59:39 -0700 Subject: [PATCH 141/286] Disable locking for development mode (#1302) ## Changes This changes `databricks bundle deploy` so that it skips the lock acquisition/release step for a `mode: development` target: * This saves about 2 seconds (measured over 100 runs on a quiet/busy workspace). * This helps avoid the `deploy lock acquired by lennart@company.com at 2024-02-28 15:48:38.40603 +0100 CET. Use --force-lock to override` error * Risk: this may cause deployment conflicts, but since dev mode deployments are always scoped to a user, that risk should be minimal Update after discussion: * This behavior can now be disabled via a setting. * Docs PR: https://github.com/databricks/docs/pull/15873 ## Measurements ### 100 deployments of the "python_default" project to an empty workspace _Before this branch:_ p50 time: 11.479 seconds p90 time: 11.757 seconds _After this branch:_ p50 time: 9.386 seconds p90 time: 9.599 seconds ### 100 deployments of the "python_default" project to a busy (staging) workspace _Before this branch:_ * p50 time: 13.335 seconds * p90 time: 15.295 seconds _After this branch:_ * p50 time: 11.397 seconds * p90 time: 11.743 seconds ### Typical duration of deployment steps * Acquiring Deployment Lock: 1.096 seconds * Deployment Preparations and Operations: 1.477 seconds * Uploading Artifacts: 1.26 seconds * Finalizing Deployment: 9.699 seconds * Releasing Deployment Lock: 1.198 seconds --------- Co-authored-by: Pieter Noordhuis Co-authored-by: Andrew Nester --- bundle/config/deployment.go | 2 +- bundle/config/lock.go | 13 ++++++++++- bundle/config/mutator/process_target_mode.go | 22 ++++++++++++++++--- .../mutator/process_target_mode_test.go | 20 +++++++++++++++++ bundle/config/root.go | 12 ++++++++++ 5 files changed, 64 insertions(+), 5 deletions(-) diff --git a/bundle/config/deployment.go b/bundle/config/deployment.go index f89c7b3ee..7f0f57a8c 100644 --- a/bundle/config/deployment.go +++ b/bundle/config/deployment.go @@ -6,5 +6,5 @@ type Deployment struct { FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"` // Lock configures locking behavior on deployment. - Lock Lock `json:"lock" bundle:"readonly"` + Lock Lock `json:"lock"` } diff --git a/bundle/config/lock.go b/bundle/config/lock.go index 760099a95..10e9e1c9c 100644 --- a/bundle/config/lock.go +++ b/bundle/config/lock.go @@ -1,7 +1,7 @@ package config type Lock struct { - // Enabled toggles deployment lock. True by default. + // Enabled toggles deployment lock. True by default except in development mode. // Use a pointer value so that only explicitly configured values are set // and we don't merge configuration with zero-initialized values. Enabled *bool `json:"enabled,omitempty"` @@ -11,9 +11,20 @@ type Lock struct { Force bool `json:"force,omitempty"` } +// IsEnabled checks if the deployment lock is enabled. func (lock Lock) IsEnabled() bool { if lock.Enabled != nil { return *lock.Enabled } return true } + +// IsExplicitlyEnabled checks if the deployment lock is explicitly enabled. +// Only returns true if locking is explicitly set using a command-line +// flag or configuration file. +func (lock Lock) IsExplicitlyEnabled() bool { + if lock.Enabled != nil { + return *lock.Enabled + } + return false +} diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index d3de5728c..8e70fab73 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" @@ -29,9 +30,16 @@ func (m *processTargetMode) Name() string { // Mark all resources as being for 'development' purposes, i.e. // changing their their name, adding tags, and (in the future) // marking them as 'hidden' in the UI. -func transformDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { - r := b.Config.Resources +func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + if !b.Config.Bundle.Deployment.Lock.IsExplicitlyEnabled() { + log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true") + err := disableDeploymentLock(b) + if err != nil { + return diag.FromErr(err) + } + } + r := b.Config.Resources shortName := b.Config.Workspace.CurrentUser.ShortName prefix := "[dev " + shortName + "] " @@ -100,6 +108,14 @@ func transformDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { return nil } +func disableDeploymentLock(b *bundle.Bundle) error { + return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + return dyn.Map(v, "bundle.deployment.lock", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "enabled", dyn.V(false)) + }) + }) +} + func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { if path := findNonUserPath(b); path != "" { return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path) @@ -163,7 +179,7 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Di if diags != nil { return diags } - return transformDevelopmentMode(b) + return transformDevelopmentMode(ctx, b) case config.Production: isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName) return validateProductionMode(ctx, b, isPrincipal) diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 17f838160..583efcfe5 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -301,3 +301,23 @@ func TestAllResourcesRenamed(t *testing.T) { } } } + +func TestDisableLocking(t *testing.T) { + ctx := context.Background() + b := mockBundle(config.Development) + + err := transformDevelopmentMode(ctx, b) + require.Nil(t, err) + assert.False(t, b.Config.Bundle.Deployment.Lock.IsEnabled()) +} + +func TestDisableLockingDisabled(t *testing.T) { + ctx := context.Background() + b := mockBundle(config.Development) + explicitlyEnabled := true + b.Config.Bundle.Deployment.Lock.Enabled = &explicitlyEnabled + + err := transformDevelopmentMode(ctx, b) + require.Nil(t, err) + assert.True(t, b.Config.Bundle.Deployment.Lock.IsEnabled(), "Deployment lock should remain enabled in development mode when explicitly enabled") +} diff --git a/bundle/config/root.go b/bundle/config/root.go index 18b548d64..70ca14ea5 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -143,6 +143,18 @@ func (r *Root) updateWithDynamicValue(nv dyn.Value) error { return nil } +// Mutate applies a transformation to the dynamic configuration value of a Root object. +// +// Parameters: +// - fn: A function that mutates a dyn.Value object +// +// Example usage, setting bundle.deployment.lock.enabled to false: +// +// err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { +// return dyn.Map(v, "bundle.deployment.lock", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { +// return dyn.Set(v, "enabled", dyn.V(false)) +// }) +// }) func (r *Root) Mutate(fn func(dyn.Value) (dyn.Value, error)) error { err := r.initializeDynamicValue() if err != nil { From 542156c30b043c8689a44a20621f53d035d0c97f Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 18 Apr 2024 11:56:16 +0200 Subject: [PATCH 142/286] Resolve variable references inside variable lookup fields (#1368) ## Changes Allows for the syntax below ``` variables: service_principal_app_id: description: 'The app id of the service principal for running workflows as.' lookup: service_principal: "sp-${bundle.environment}" ``` Fixes #1259 ## Tests Added regression test --- .../resolve_resource_references_test.go | 61 +++++++++++++ .../mutator/resolve_variable_references.go | 90 +++++++++++++------ bundle/phases/initialize.go | 1 + 3 files changed, 127 insertions(+), 25 deletions(-) diff --git a/bundle/config/mutator/resolve_resource_references_test.go b/bundle/config/mutator/resolve_resource_references_test.go index 16934ff38..60636bcc6 100644 --- a/bundle/config/mutator/resolve_resource_references_test.go +++ b/bundle/config/mutator/resolve_resource_references_test.go @@ -133,3 +133,64 @@ func TestResolveServicePrincipal(t *testing.T) { require.NoError(t, diags.Error()) require.Equal(t, "app-1234", *b.Config.Variables["my-sp"].Value) } + +func TestResolveVariableReferencesInVariableLookups(t *testing.T) { + s := func(s string) *string { + return &s + } + + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Target: "dev", + }, + Variables: map[string]*variable.Variable{ + "foo": { + Value: s("bar"), + }, + "lookup": { + Lookup: &variable.Lookup{ + Cluster: "cluster-${var.foo}-${bundle.target}", + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + clusterApi := m.GetMockClustersAPI() + clusterApi.EXPECT().GetByClusterName(mock.Anything, "cluster-bar-dev").Return(&compute.ClusterDetails{ + ClusterId: "1234-5678-abcd", + }, nil) + + diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences())) + require.NoError(t, diags.Error()) + require.Equal(t, "cluster-bar-dev", b.Config.Variables["lookup"].Lookup.Cluster) + require.Equal(t, "1234-5678-abcd", *b.Config.Variables["lookup"].Value) +} + +func TestResolveLookupVariableReferencesInVariableLookups(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Variables: map[string]*variable.Variable{ + "another_lookup": { + Lookup: &variable.Lookup{ + Cluster: "cluster", + }, + }, + "lookup": { + Lookup: &variable.Lookup{ + Cluster: "cluster-${var.another_lookup}", + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences())) + require.ErrorContains(t, diags.Error(), "lookup variables cannot contain references to another lookup variables") +} diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go index 0738c9bcb..f7fce6c82 100644 --- a/bundle/config/mutator/resolve_variable_references.go +++ b/bundle/config/mutator/resolve_variable_references.go @@ -2,8 +2,10 @@ package mutator import ( "context" + "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/dyn/convert" @@ -13,10 +15,50 @@ import ( type resolveVariableReferences struct { prefixes []string + pattern dyn.Pattern + lookupFn func(dyn.Value, dyn.Path) (dyn.Value, error) } func ResolveVariableReferences(prefixes ...string) bundle.Mutator { - return &resolveVariableReferences{prefixes: prefixes} + return &resolveVariableReferences{prefixes: prefixes, lookupFn: lookup} +} + +func ResolveVariableReferencesInLookup() bundle.Mutator { + return &resolveVariableReferences{prefixes: []string{ + "bundle", + "workspace", + "variables", + }, pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("lookup")), lookupFn: lookupForVariables} +} + +func lookup(v dyn.Value, path dyn.Path) (dyn.Value, error) { + // Future opportunity: if we lookup this path in both the given root + // and the synthesized root, we know if it was explicitly set or implied to be empty. + // Then we can emit a warning if it was not explicitly set. + return dyn.GetByPath(v, path) +} + +func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { + if path[0].Key() != "variables" { + return lookup(v, path) + } + + varV, err := dyn.GetByPath(v, path[:len(path)-1]) + if err != nil { + return dyn.InvalidValue, err + } + + var vv variable.Variable + err = convert.ToTyped(&vv, varV) + if err != nil { + return dyn.InvalidValue, err + } + + if vv.Lookup != nil && vv.Lookup.String() != "" { + return dyn.InvalidValue, fmt.Errorf("lookup variables cannot contain references to another lookup variables") + } + + return lookup(v, path) } func (*resolveVariableReferences) Name() string { @@ -48,37 +90,35 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) // // This is consistent with the behavior prior to using the dynamic value system. // - // We can ignore the diagnostics return valuebecause we know that the dynamic value + // We can ignore the diagnostics return value because we know that the dynamic value // has already been normalized when it was first loaded from the configuration file. // normalized, _ := convert.Normalize(b.Config, root, convert.IncludeMissingFields) - lookup := func(path dyn.Path) (dyn.Value, error) { - // Future opportunity: if we lookup this path in both the given root - // and the synthesized root, we know if it was explicitly set or implied to be empty. - // Then we can emit a warning if it was not explicitly set. - return dyn.GetByPath(normalized, path) - } - // Resolve variable references in all values. - root, err := dynvar.Resolve(root, func(path dyn.Path) (dyn.Value, error) { - // Rewrite the shorthand path ${var.foo} into ${variables.foo.value}. - if path.HasPrefix(varPath) && len(path) == 2 { - path = dyn.NewPath( - dyn.Key("variables"), - path[1], - dyn.Key("value"), - ) - } - - // Perform resolution only if the path starts with one of the specified prefixes. - for _, prefix := range prefixes { - if path.HasPrefix(prefix) { - return lookup(path) + // If the pattern is nil, we resolve references in the entire configuration. + root, err := dyn.MapByPattern(root, m.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + // Resolve variable references in all values. + return dynvar.Resolve(v, func(path dyn.Path) (dyn.Value, error) { + // Rewrite the shorthand path ${var.foo} into ${variables.foo.value}. + if path.HasPrefix(varPath) && len(path) == 2 { + path = dyn.NewPath( + dyn.Key("variables"), + path[1], + dyn.Key("value"), + ) } - } - return dyn.InvalidValue, dynvar.ErrSkipResolution + // Perform resolution only if the path starts with one of the specified prefixes. + for _, prefix := range prefixes { + if path.HasPrefix(prefix) { + return m.lookupFn(normalized, path) + } + } + + return dyn.InvalidValue, dynvar.ErrSkipResolution + }) }) + if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 6761ffabc..d6a1b95da 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -28,6 +28,7 @@ func Initialize() bundle.Mutator { mutator.ExpandWorkspaceRoot(), mutator.DefineDefaultWorkspacePaths(), mutator.SetVariables(), + mutator.ResolveVariableReferencesInLookup(), mutator.ResolveResourceReferences(), mutator.ResolveVariableReferences( "bundle", From eb9665d2ee2bb553af611b903de0e0ad90b7989e Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 18 Apr 2024 17:25:42 +0530 Subject: [PATCH 143/286] Add better documentation for the `auth login` command (#1366) This PR improves the documentation for the `auth login` command, accounting for the various ways this command can be used in. --------- Co-authored-by: PaulCornellDB Co-authored-by: Pieter Noordhuis --- cmd/auth/auth.go | 6 +++--- cmd/auth/login.go | 47 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 49 insertions(+), 4 deletions(-) diff --git a/cmd/auth/auth.go b/cmd/auth/auth.go index 4af2a7a71..79e1063b1 100644 --- a/cmd/auth/auth.go +++ b/cmd/auth/auth.go @@ -16,9 +16,9 @@ func New() *cobra.Command { authentication for the Databricks CLI and SDKs work please refer to the documentation linked below. -AWS: https://docs.databricks.com/en/dev-tools/auth/index.html -Azure: https://learn.microsoft.com/en-us/azure/databricks/dev-tools/auth -GCP: https://docs.gcp.databricks.com/en/dev-tools/auth/index.html`, +AWS: https://docs.databricks.com/dev-tools/auth/index.html +Azure: https://learn.microsoft.com/azure/databricks/dev-tools/auth +GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html`, } var perisistentAuth auth.PersistentAuth diff --git a/cmd/auth/login.go b/cmd/auth/login.go index b0bc7a853..c033054b8 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "runtime" "time" "github.com/databricks/cli/libs/auth" @@ -32,9 +33,53 @@ func configureHost(ctx context.Context, persistentAuth *auth.PersistentAuth, arg const minimalDbConnectVersion = "13.1" func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { + defaultConfigPath := "~/.databrickscfg" + if runtime.GOOS == "windows" { + defaultConfigPath = "%USERPROFILE%\\.databrickscfg" + } cmd := &cobra.Command{ Use: "login [HOST]", - Short: "Authenticate this machine", + Short: "Log into a Databricks workspace or account", + Long: fmt.Sprintf(`Log into a Databricks workspace or account. +This command logs you into the Databricks workspace or account and saves +the authentication configuration in a profile (in %s by default). + +This profile can then be used to authenticate other Databricks CLI commands by +specifying the --profile flag. This profile can also be used to authenticate +other Databricks tooling that supports the Databricks Unified Authentication +Specification. This includes the Databricks Go, Python, and Java SDKs. For more information, +you can refer to the documentation linked below. + AWS: https://docs.databricks.com/dev-tools/auth/index.html + Azure: https://learn.microsoft.com/azure/databricks/dev-tools/auth + GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html + + +This command requires a Databricks Host URL (using --host or as a positional argument +or implicitly inferred from the specified profile name) +and a profile name (using --profile) to be specified. If you don't specify these +values, you'll be prompted for values at runtime. + +While this command always logs you into the specified host, the runtime behaviour +depends on the existing profiles you have set in your configuration file +(at %s by default). + +1. If a profile with the specified name exists and specifies a host, you'll + be logged into the host specified by the profile. The profile will be updated + to use "databricks-cli" as the auth type if that was not the case before. + +2. If a profile with the specified name exists but does not specify a host, + you'll be prompted to specify a host. The profile will be updated to use the + specified host. The auth type will be updated to "databricks-cli" if that was + not the case before. + +3. If a profile with the specified name exists and specifies a host, but you + specify a host using --host (or as the [HOST] positional arg), the profile will + be updated to use the newly specified host. The auth type will be updated to + "databricks-cli" if that was not the case before. + +4. If a profile with the specified name does not exist, a new profile will be + created with the specified host. The auth type will be set to "databricks-cli". +`, defaultConfigPath, defaultConfigPath), } var loginTimeout time.Duration From 27f51c760f3c2893aff0db25364cc2558385395c Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 18 Apr 2024 17:13:16 +0200 Subject: [PATCH 144/286] Added validate mutator to surface additional bundle warnings (#1352) ## Changes All these validators will return warnings as part of `bundle validate` run Added 2 mutators: 1. To check that if tasks use job_cluster_key it is actually defined 2. To check if there are any files to sync as part of deployment Also added `bundle.Parallel` to run them in parallel To make sure mutators under bundle.Parallel do not mutate config, introduced new `ReadOnlyMutator`, `ReadOnlyBundle` and `ReadOnlyConfig`. Example ``` databricks bundle validate -p deco-staging Warning: unknown field: new_cluster at resources.jobs.my_job in bundle.yml:24:7 Warning: job_cluster_key high_cpu_workload_job_cluster is not defined at resources.jobs.my_job.tasks[0].job_cluster_key in bundle.yml:35:28 Warning: There are no files to sync, please check your your .gitignore and sync.exclude configuration at sync.exclude in bundle.yml:18:5 Name: test Target: default Workspace: Host: https://acme.databricks.com User: andrew.nester@databricks.com Path: /Users/andrew.nester@databricks.com/.bundle/test/default Found 3 warnings ``` ## Tests Added unit tests --- bundle/bundle_read_only.go | 36 +++++++ bundle/config/root.go | 2 +- bundle/config/validate/files_to_sync.go | 54 +++++++++++ .../validate/job_cluster_key_defined.go | 53 ++++++++++ .../validate/job_cluster_key_defined_test.go | 97 +++++++++++++++++++ bundle/config/validate/validate.go | 43 ++++++++ .../config/validate/validate_sync_patterns.go | 79 +++++++++++++++ bundle/deploy/files/delete.go | 2 +- bundle/deploy/files/sync.go | 24 ++--- bundle/deploy/files/upload.go | 2 +- bundle/deploy/state_pull.go | 2 +- bundle/deploy/state_pull_test.go | 4 +- bundle/deploy/state_update.go | 2 +- bundle/mutator_read_only.go | 29 ++++++ bundle/parallel.go | 43 ++++++++ bundle/parallel_test.go | 73 ++++++++++++++ bundle/tests/job_cluster_key/databricks.yml | 27 ++++++ bundle/tests/job_cluster_key_test.go | 28 ++++++ .../sync_include_exclude_no_matches_test.go | 39 ++++++++ cmd/bundle/sync.go | 2 +- cmd/bundle/validate.go | 2 + cmd/sync/sync.go | 2 +- 22 files changed, 624 insertions(+), 21 deletions(-) create mode 100644 bundle/bundle_read_only.go create mode 100644 bundle/config/validate/files_to_sync.go create mode 100644 bundle/config/validate/job_cluster_key_defined.go create mode 100644 bundle/config/validate/job_cluster_key_defined_test.go create mode 100644 bundle/config/validate/validate.go create mode 100644 bundle/config/validate/validate_sync_patterns.go create mode 100644 bundle/mutator_read_only.go create mode 100644 bundle/parallel.go create mode 100644 bundle/parallel_test.go create mode 100644 bundle/tests/job_cluster_key/databricks.yml create mode 100644 bundle/tests/job_cluster_key_test.go create mode 100644 bundle/tests/sync_include_exclude_no_matches_test.go diff --git a/bundle/bundle_read_only.go b/bundle/bundle_read_only.go new file mode 100644 index 000000000..e4a4f9936 --- /dev/null +++ b/bundle/bundle_read_only.go @@ -0,0 +1,36 @@ +package bundle + +import ( + "context" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/databricks-sdk-go" +) + +type ReadOnlyBundle struct { + b *Bundle +} + +func ReadOnly(b *Bundle) ReadOnlyBundle { + return ReadOnlyBundle{b: b} +} + +func (r ReadOnlyBundle) Config() config.Root { + return r.b.Config +} + +func (r ReadOnlyBundle) RootPath() string { + return r.b.RootPath +} + +func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient { + return r.b.WorkspaceClient() +} + +func (r ReadOnlyBundle) CacheDir(ctx context.Context, paths ...string) (string, error) { + return r.b.CacheDir(ctx, paths...) +} + +func (r ReadOnlyBundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { + return r.b.GetSyncIncludePatterns(ctx) +} diff --git a/bundle/config/root.go b/bundle/config/root.go index 70ca14ea5..17f2747ef 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -452,7 +452,7 @@ func validateVariableOverrides(root, target dyn.Value) (err error) { // Best effort to get the location of configuration value at the specified path. // This function is useful to annotate error messages with the location, because // we don't want to fail with a different error message if we cannot retrieve the location. -func (r *Root) GetLocation(path string) dyn.Location { +func (r Root) GetLocation(path string) dyn.Location { v, err := dyn.Get(r.value, path) if err != nil { return dyn.Location{} diff --git a/bundle/config/validate/files_to_sync.go b/bundle/config/validate/files_to_sync.go new file mode 100644 index 000000000..d53e38243 --- /dev/null +++ b/bundle/config/validate/files_to_sync.go @@ -0,0 +1,54 @@ +package validate + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/files" + "github.com/databricks/cli/libs/diag" +) + +func FilesToSync() bundle.ReadOnlyMutator { + return &filesToSync{} +} + +type filesToSync struct { +} + +func (v *filesToSync) Name() string { + return "validate:files_to_sync" +} + +func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { + sync, err := files.GetSync(ctx, rb) + if err != nil { + return diag.FromErr(err) + } + + fl, err := sync.GetFileList(ctx) + if err != nil { + return diag.FromErr(err) + } + + if len(fl) != 0 { + return nil + } + + diags := diag.Diagnostics{} + if len(rb.Config().Sync.Exclude) == 0 { + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: "There are no files to sync, please check your .gitignore", + }) + } else { + loc := location{path: "sync.exclude", rb: rb} + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: "There are no files to sync, please check your .gitignore and sync.exclude configuration", + Location: loc.Location(), + Path: loc.Path(), + }) + } + + return diags +} diff --git a/bundle/config/validate/job_cluster_key_defined.go b/bundle/config/validate/job_cluster_key_defined.go new file mode 100644 index 000000000..37ed3f417 --- /dev/null +++ b/bundle/config/validate/job_cluster_key_defined.go @@ -0,0 +1,53 @@ +package validate + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" +) + +func JobClusterKeyDefined() bundle.ReadOnlyMutator { + return &jobClusterKeyDefined{} +} + +type jobClusterKeyDefined struct { +} + +func (v *jobClusterKeyDefined) Name() string { + return "validate:job_cluster_key_defined" +} + +func (v *jobClusterKeyDefined) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { + diags := diag.Diagnostics{} + + for k, job := range rb.Config().Resources.Jobs { + jobClusterKeys := make(map[string]bool) + for _, cluster := range job.JobClusters { + if cluster.JobClusterKey != "" { + jobClusterKeys[cluster.JobClusterKey] = true + } + } + + for index, task := range job.Tasks { + if task.JobClusterKey != "" { + if _, ok := jobClusterKeys[task.JobClusterKey]; !ok { + loc := location{ + path: fmt.Sprintf("resources.jobs.%s.tasks[%d].job_cluster_key", k, index), + rb: rb, + } + + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("job_cluster_key %s is not defined", task.JobClusterKey), + Location: loc.Location(), + Path: loc.Path(), + }) + } + } + } + } + + return diags +} diff --git a/bundle/config/validate/job_cluster_key_defined_test.go b/bundle/config/validate/job_cluster_key_defined_test.go new file mode 100644 index 000000000..176b0fedc --- /dev/null +++ b/bundle/config/validate/job_cluster_key_defined_test.go @@ -0,0 +1,97 @@ +package validate + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestJobClusterKeyDefined(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Name: "job1", + JobClusters: []jobs.JobCluster{ + {JobClusterKey: "do-not-exist"}, + }, + Tasks: []jobs.Task{ + {JobClusterKey: "do-not-exist"}, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined()) + require.Len(t, diags, 0) + require.NoError(t, diags.Error()) +} + +func TestJobClusterKeyNotDefined(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Name: "job1", + Tasks: []jobs.Task{ + {JobClusterKey: "do-not-exist"}, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined()) + require.Len(t, diags, 1) + require.NoError(t, diags.Error()) + require.Equal(t, diags[0].Severity, diag.Warning) + require.Equal(t, diags[0].Summary, "job_cluster_key do-not-exist is not defined") +} + +func TestJobClusterKeyDefinedInDifferentJob(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Name: "job1", + Tasks: []jobs.Task{ + {JobClusterKey: "do-not-exist"}, + }, + }, + }, + "job2": { + JobSettings: &jobs.JobSettings{ + Name: "job2", + JobClusters: []jobs.JobCluster{ + {JobClusterKey: "do-not-exist"}, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined()) + require.Len(t, diags, 1) + require.NoError(t, diags.Error()) + require.Equal(t, diags[0].Severity, diag.Warning) + require.Equal(t, diags[0].Summary, "job_cluster_key do-not-exist is not defined") +} diff --git a/bundle/config/validate/validate.go b/bundle/config/validate/validate.go new file mode 100644 index 000000000..af7e984a1 --- /dev/null +++ b/bundle/config/validate/validate.go @@ -0,0 +1,43 @@ +package validate + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type validate struct { +} + +type location struct { + path string + rb bundle.ReadOnlyBundle +} + +func (l location) Location() dyn.Location { + return l.rb.Config().GetLocation(l.path) +} + +func (l location) Path() dyn.Path { + return dyn.MustPathFromString(l.path) +} + +// Apply implements bundle.Mutator. +func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + return bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), bundle.Parallel( + JobClusterKeyDefined(), + FilesToSync(), + ValidateSyncPatterns(), + )) +} + +// Name implements bundle.Mutator. +func (v *validate) Name() string { + return "validate" +} + +func Validate() bundle.Mutator { + return &validate{} +} diff --git a/bundle/config/validate/validate_sync_patterns.go b/bundle/config/validate/validate_sync_patterns.go new file mode 100644 index 000000000..58acf6ae4 --- /dev/null +++ b/bundle/config/validate/validate_sync_patterns.go @@ -0,0 +1,79 @@ +package validate + +import ( + "context" + "fmt" + "sync" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/fileset" + "golang.org/x/sync/errgroup" +) + +func ValidateSyncPatterns() bundle.ReadOnlyMutator { + return &validateSyncPatterns{} +} + +type validateSyncPatterns struct { +} + +func (v *validateSyncPatterns) Name() string { + return "validate:validate_sync_patterns" +} + +func (v *validateSyncPatterns) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { + s := rb.Config().Sync + if len(s.Exclude) == 0 && len(s.Include) == 0 { + return nil + } + + diags, err := checkPatterns(s.Exclude, "sync.exclude", rb) + if err != nil { + return diag.FromErr(err) + } + + includeDiags, err := checkPatterns(s.Include, "sync.include", rb) + if err != nil { + return diag.FromErr(err) + } + + return diags.Extend(includeDiags) +} + +func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (diag.Diagnostics, error) { + var mu sync.Mutex + var errs errgroup.Group + var diags diag.Diagnostics + + for i, pattern := range patterns { + index := i + p := pattern + errs.Go(func() error { + fs, err := fileset.NewGlobSet(rb.RootPath(), []string{p}) + if err != nil { + return err + } + + all, err := fs.All() + if err != nil { + return err + } + + if len(all) == 0 { + loc := location{path: fmt.Sprintf("%s[%d]", path, index), rb: rb} + mu.Lock() + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("Pattern %s does not match any files", p), + Location: loc.Location(), + Path: loc.Path(), + }) + mu.Unlock() + } + return nil + }) + } + + return diags, errs.Wait() +} diff --git a/bundle/deploy/files/delete.go b/bundle/deploy/files/delete.go index 9367e2a62..46c554463 100644 --- a/bundle/deploy/files/delete.go +++ b/bundle/deploy/files/delete.go @@ -46,7 +46,7 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { } // Clean up sync snapshot file - sync, err := GetSync(ctx, b) + sync, err := GetSync(ctx, bundle.ReadOnly(b)) if err != nil { return diag.FromErr(err) } diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index e8c54c633..d78ab2d74 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -8,40 +8,40 @@ import ( "github.com/databricks/cli/libs/sync" ) -func GetSync(ctx context.Context, b *bundle.Bundle) (*sync.Sync, error) { - opts, err := GetSyncOptions(ctx, b) +func GetSync(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.Sync, error) { + opts, err := GetSyncOptions(ctx, rb) if err != nil { return nil, fmt.Errorf("cannot get sync options: %w", err) } return sync.New(ctx, *opts) } -func GetSyncOptions(ctx context.Context, b *bundle.Bundle) (*sync.SyncOptions, error) { - cacheDir, err := b.CacheDir(ctx) +func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOptions, error) { + cacheDir, err := rb.CacheDir(ctx) if err != nil { return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) } - includes, err := b.GetSyncIncludePatterns(ctx) + includes, err := rb.GetSyncIncludePatterns(ctx) if err != nil { return nil, fmt.Errorf("cannot get list of sync includes: %w", err) } opts := &sync.SyncOptions{ - LocalPath: b.RootPath, - RemotePath: b.Config.Workspace.FilePath, + LocalPath: rb.RootPath(), + RemotePath: rb.Config().Workspace.FilePath, Include: includes, - Exclude: b.Config.Sync.Exclude, - Host: b.WorkspaceClient().Config.Host, + Exclude: rb.Config().Sync.Exclude, + Host: rb.WorkspaceClient().Config.Host, Full: false, SnapshotBasePath: cacheDir, - WorkspaceClient: b.WorkspaceClient(), + WorkspaceClient: rb.WorkspaceClient(), } - if b.Config.Workspace.CurrentUser != nil { - opts.CurrentUser = b.Config.Workspace.CurrentUser.User + if rb.Config().Workspace.CurrentUser != nil { + opts.CurrentUser = rb.Config().Workspace.CurrentUser.User } return opts, nil diff --git a/bundle/deploy/files/upload.go b/bundle/deploy/files/upload.go index 58cb3c0f0..fa20ed4ea 100644 --- a/bundle/deploy/files/upload.go +++ b/bundle/deploy/files/upload.go @@ -18,7 +18,7 @@ func (m *upload) Name() string { func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath)) - sync, err := GetSync(ctx, b) + sync, err := GetSync(ctx, bundle.ReadOnly(b)) if err != nil { return diag.FromErr(err) } diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go index bae457ea0..57b38ec6c 100644 --- a/bundle/deploy/state_pull.go +++ b/bundle/deploy/state_pull.go @@ -79,7 +79,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic } // Create a new snapshot based on the deployment state file. - opts, err := files.GetSyncOptions(ctx, b) + opts, err := files.GetSyncOptions(ctx, bundle.ReadOnly(b)) if err != nil { return diag.FromErr(err) } diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index 80acb254f..ca4834731 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -85,7 +85,7 @@ func testStatePull(t *testing.T, opts statePullOpts) { } if opts.withExistingSnapshot { - opts, err := files.GetSyncOptions(ctx, b) + opts, err := files.GetSyncOptions(ctx, bundle.ReadOnly(b)) require.NoError(t, err) snapshotPath, err := sync.SnapshotPath(opts) @@ -127,7 +127,7 @@ func testStatePull(t *testing.T, opts statePullOpts) { } if opts.expects.snapshotState != nil { - syncOpts, err := files.GetSyncOptions(ctx, b) + syncOpts, err := files.GetSyncOptions(ctx, bundle.ReadOnly(b)) require.NoError(t, err) snapshotPath, err := sync.SnapshotPath(syncOpts) diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go index cf2e9ac9e..885e47a7a 100644 --- a/bundle/deploy/state_update.go +++ b/bundle/deploy/state_update.go @@ -39,7 +39,7 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost state.Version = DeploymentStateVersion // Get the current file list. - sync, err := files.GetSync(ctx, b) + sync, err := files.GetSync(ctx, bundle.ReadOnly(b)) if err != nil { return diag.FromErr(err) } diff --git a/bundle/mutator_read_only.go b/bundle/mutator_read_only.go new file mode 100644 index 000000000..ee4e36e0f --- /dev/null +++ b/bundle/mutator_read_only.go @@ -0,0 +1,29 @@ +package bundle + +import ( + "context" + + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/log" +) + +// ReadOnlyMutator is the interface type that allows access to bundle configuration but does not allow any mutations. +type ReadOnlyMutator interface { + // Name returns the mutators name. + Name() string + + // Apply access the specified read-only bundle object. + Apply(context.Context, ReadOnlyBundle) diag.Diagnostics +} + +func ApplyReadOnly(ctx context.Context, rb ReadOnlyBundle, m ReadOnlyMutator) diag.Diagnostics { + ctx = log.NewContext(ctx, log.GetLogger(ctx).With("mutator (read-only)", m.Name())) + + log.Debugf(ctx, "ApplyReadOnly") + diags := m.Apply(ctx, rb) + if err := diags.Error(); err != nil { + log.Errorf(ctx, "Error: %s", err) + } + + return diags +} diff --git a/bundle/parallel.go b/bundle/parallel.go new file mode 100644 index 000000000..ebb91661a --- /dev/null +++ b/bundle/parallel.go @@ -0,0 +1,43 @@ +package bundle + +import ( + "context" + "sync" + + "github.com/databricks/cli/libs/diag" +) + +type parallel struct { + mutators []ReadOnlyMutator +} + +func (m *parallel) Name() string { + return "parallel" +} + +func (m *parallel) Apply(ctx context.Context, rb ReadOnlyBundle) diag.Diagnostics { + var wg sync.WaitGroup + var mu sync.Mutex + var diags diag.Diagnostics + + wg.Add(len(m.mutators)) + for _, mutator := range m.mutators { + go func(mutator ReadOnlyMutator) { + defer wg.Done() + d := ApplyReadOnly(ctx, rb, mutator) + + mu.Lock() + diags = diags.Extend(d) + mu.Unlock() + }(mutator) + } + wg.Wait() + return diags +} + +// Parallel runs the given mutators in parallel. +func Parallel(mutators ...ReadOnlyMutator) ReadOnlyMutator { + return ¶llel{ + mutators: mutators, + } +} diff --git a/bundle/parallel_test.go b/bundle/parallel_test.go new file mode 100644 index 000000000..be1e33637 --- /dev/null +++ b/bundle/parallel_test.go @@ -0,0 +1,73 @@ +package bundle + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" + "github.com/stretchr/testify/require" +) + +type addToContainer struct { + container *[]int + value int + err bool +} + +func (m *addToContainer) Apply(ctx context.Context, b ReadOnlyBundle) diag.Diagnostics { + if m.err { + return diag.Errorf("error") + } + + c := *m.container + c = append(c, m.value) + *m.container = c + return nil +} + +func (m *addToContainer) Name() string { + return "addToContainer" +} + +func TestParallelMutatorWork(t *testing.T) { + b := &Bundle{ + Config: config.Root{}, + } + + container := []int{} + m1 := &addToContainer{container: &container, value: 1} + m2 := &addToContainer{container: &container, value: 2} + m3 := &addToContainer{container: &container, value: 3} + + m := Parallel(m1, m2, m3) + + // Apply the mutator + diags := ApplyReadOnly(context.Background(), ReadOnly(b), m) + require.Empty(t, diags) + require.Len(t, container, 3) + require.Contains(t, container, 1) + require.Contains(t, container, 2) + require.Contains(t, container, 3) +} + +func TestParallelMutatorWorkWithErrors(t *testing.T) { + b := &Bundle{ + Config: config.Root{}, + } + + container := []int{} + m1 := &addToContainer{container: &container, value: 1} + m2 := &addToContainer{container: &container, err: true, value: 2} + m3 := &addToContainer{container: &container, value: 3} + + m := Parallel(m1, m2, m3) + + // Apply the mutator + diags := ApplyReadOnly(context.Background(), ReadOnly(b), m) + require.Len(t, diags, 1) + require.Equal(t, "error", diags[0].Summary) + require.Len(t, container, 2) + require.Contains(t, container, 1) + require.Contains(t, container, 3) +} diff --git a/bundle/tests/job_cluster_key/databricks.yml b/bundle/tests/job_cluster_key/databricks.yml new file mode 100644 index 000000000..bd863db3e --- /dev/null +++ b/bundle/tests/job_cluster_key/databricks.yml @@ -0,0 +1,27 @@ +bundle: + name: job_cluster_key + +workspace: + host: https://acme.cloud.databricks.com/ + +targets: + default: + resources: + jobs: + foo: + name: job + tasks: + - task_key: test + job_cluster_key: key + development: + resources: + jobs: + foo: + job_clusters: + - job_cluster_key: key + new_cluster: + node_type_id: i3.xlarge + num_workers: 1 + tasks: + - task_key: test + job_cluster_key: key diff --git a/bundle/tests/job_cluster_key_test.go b/bundle/tests/job_cluster_key_test.go new file mode 100644 index 000000000..5a8b368e5 --- /dev/null +++ b/bundle/tests/job_cluster_key_test.go @@ -0,0 +1,28 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/validate" + "github.com/databricks/cli/libs/diag" + "github.com/stretchr/testify/require" +) + +func TestJobClusterKeyNotDefinedTest(t *testing.T) { + b := loadTarget(t, "./job_cluster_key", "default") + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.JobClusterKeyDefined()) + require.Len(t, diags, 1) + require.NoError(t, diags.Error()) + require.Equal(t, diags[0].Severity, diag.Warning) + require.Equal(t, diags[0].Summary, "job_cluster_key key is not defined") +} + +func TestJobClusterKeyDefinedTest(t *testing.T) { + b := loadTarget(t, "./job_cluster_key", "development") + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.JobClusterKeyDefined()) + require.Len(t, diags, 0) +} diff --git a/bundle/tests/sync_include_exclude_no_matches_test.go b/bundle/tests/sync_include_exclude_no_matches_test.go new file mode 100644 index 000000000..135e2faac --- /dev/null +++ b/bundle/tests/sync_include_exclude_no_matches_test.go @@ -0,0 +1,39 @@ +package config_tests + +import ( + "context" + "fmt" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/validate" + "github.com/databricks/cli/libs/diag" + "github.com/stretchr/testify/require" +) + +func TestSyncIncludeExcludeNoMatchesTest(t *testing.T) { + b := loadTarget(t, "./override_sync", "development") + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.ValidateSyncPatterns()) + require.Len(t, diags, 3) + require.NoError(t, diags.Error()) + + require.Equal(t, diags[0].Severity, diag.Warning) + require.Equal(t, diags[0].Summary, "Pattern dist does not match any files") + require.Equal(t, diags[0].Location.File, filepath.Join("override_sync", "databricks.yml")) + require.Equal(t, diags[0].Location.Line, 17) + require.Equal(t, diags[0].Location.Column, 11) + require.Equal(t, diags[0].Path.String(), "sync.exclude[0]") + + summaries := []string{ + fmt.Sprintf("Pattern %s does not match any files", filepath.Join("src", "*")), + fmt.Sprintf("Pattern %s does not match any files", filepath.Join("tests", "*")), + } + + require.Equal(t, diags[1].Severity, diag.Warning) + require.Contains(t, summaries, diags[1].Summary) + + require.Equal(t, diags[2].Severity, diag.Warning) + require.Contains(t, summaries, diags[2].Summary) +} diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index 0818aecf7..72ad8eb3a 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -21,7 +21,7 @@ type syncFlags struct { } func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) { - opts, err := files.GetSyncOptions(cmd.Context(), b) + opts, err := files.GetSyncOptions(cmd.Context(), bundle.ReadOnly(b)) if err != nil { return nil, fmt.Errorf("cannot get sync options: %w", err) } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 4a04db409..0472df479 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -8,6 +8,7 @@ import ( "text/template" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/validate" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" @@ -140,6 +141,7 @@ func newValidateCommand() *cobra.Command { } diags = diags.Extend(bundle.Apply(ctx, b, phases.Initialize())) + diags = diags.Extend(bundle.Apply(ctx, b, validate.Validate())) if err := diags.Error(); err != nil { return err } diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 6899d6fe1..42550722b 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -30,7 +30,7 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b * return nil, fmt.Errorf("SRC and DST are not configurable in the context of a bundle") } - opts, err := files.GetSyncOptions(cmd.Context(), b) + opts, err := files.GetSyncOptions(cmd.Context(), bundle.ReadOnly(b)) if err != nil { return nil, fmt.Errorf("cannot get sync options: %w", err) } From 6b81b627fe10f6d13bf64f6eca4a0901e1a9cad3 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 18 Apr 2024 22:20:01 +0200 Subject: [PATCH 145/286] Upgrade terraform-provider-databricks to 1.40.0 (#1376) ## Changes Upgrade terraform-provider-databricks to 1.40.0 --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../schema/data_source_external_location.go | 36 +++++++++++++++++++ .../schema/data_source_external_locations.go | 8 +++++ .../tf/schema/data_source_instance_pool.go | 1 + bundle/internal/tf/schema/data_source_job.go | 28 ++++++++------- .../tf/schema/data_source_metastore.go | 4 ++- bundle/internal/tf/schema/data_sources.go | 4 +++ bundle/internal/tf/schema/resource_catalog.go | 27 +++++++------- bundle/internal/tf/schema/resource_cluster.go | 5 +++ .../tf/schema/resource_instance_pool.go | 1 + bundle/internal/tf/schema/resource_job.go | 28 ++++++++------- .../tf/schema/resource_lakehouse_monitor.go | 35 ++++++++++-------- .../internal/tf/schema/resource_pipeline.go | 6 ++++ .../internal/tf/schema/resource_recipient.go | 34 ++++++++++++------ bundle/internal/tf/schema/resource_schema.go | 19 +++++----- bundle/internal/tf/schema/root.go | 2 +- 16 files changed, 166 insertions(+), 74 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_external_location.go create mode 100644 bundle/internal/tf/schema/data_source_external_locations.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 7780510ea..4fb4bf2c5 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.39.0" +const ProviderVersion = "1.40.0" diff --git a/bundle/internal/tf/schema/data_source_external_location.go b/bundle/internal/tf/schema/data_source_external_location.go new file mode 100644 index 000000000..0fea6e529 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_external_location.go @@ -0,0 +1,36 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceExternalLocationExternalLocationInfoEncryptionDetailsSseEncryptionDetails struct { + Algorithm string `json:"algorithm,omitempty"` + AwsKmsKeyArn string `json:"aws_kms_key_arn,omitempty"` +} + +type DataSourceExternalLocationExternalLocationInfoEncryptionDetails struct { + SseEncryptionDetails *DataSourceExternalLocationExternalLocationInfoEncryptionDetailsSseEncryptionDetails `json:"sse_encryption_details,omitempty"` +} + +type DataSourceExternalLocationExternalLocationInfo struct { + AccessPoint string `json:"access_point,omitempty"` + BrowseOnly bool `json:"browse_only,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + CredentialId string `json:"credential_id,omitempty"` + CredentialName string `json:"credential_name,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + ReadOnly bool `json:"read_only,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + Url string `json:"url,omitempty"` + EncryptionDetails *DataSourceExternalLocationExternalLocationInfoEncryptionDetails `json:"encryption_details,omitempty"` +} + +type DataSourceExternalLocation struct { + Id string `json:"id,omitempty"` + Name string `json:"name"` + ExternalLocationInfo *DataSourceExternalLocationExternalLocationInfo `json:"external_location_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_external_locations.go b/bundle/internal/tf/schema/data_source_external_locations.go new file mode 100644 index 000000000..05b7b59c3 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_external_locations.go @@ -0,0 +1,8 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceExternalLocations struct { + Id string `json:"id,omitempty"` + Names []string `json:"names,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_instance_pool.go b/bundle/internal/tf/schema/data_source_instance_pool.go index 240083d64..796d764b7 100644 --- a/bundle/internal/tf/schema/data_source_instance_pool.go +++ b/bundle/internal/tf/schema/data_source_instance_pool.go @@ -27,6 +27,7 @@ type DataSourceInstancePoolPoolInfoDiskSpec struct { type DataSourceInstancePoolPoolInfoGcpAttributes struct { GcpAvailability string `json:"gcp_availability,omitempty"` LocalSsdCount int `json:"local_ssd_count,omitempty"` + ZoneId string `json:"zone_id,omitempty"` } type DataSourceInstancePoolPoolInfoInstancePoolFleetAttributesFleetOnDemandOption struct { diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index 6ce02b0d1..dbd29f4ba 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -2,15 +2,6 @@ package schema -type DataSourceJobJobSettingsSettingsComputeSpec struct { - Kind string `json:"kind,omitempty"` -} - -type DataSourceJobJobSettingsSettingsCompute struct { - ComputeKey string `json:"compute_key,omitempty"` - Spec *DataSourceJobJobSettingsSettingsComputeSpec `json:"spec,omitempty"` -} - type DataSourceJobJobSettingsSettingsContinuous struct { PauseStatus string `json:"pause_status,omitempty"` } @@ -38,6 +29,16 @@ type DataSourceJobJobSettingsSettingsEmailNotifications struct { OnSuccess []string `json:"on_success,omitempty"` } +type DataSourceJobJobSettingsSettingsEnvironmentSpec struct { + Client string `json:"client"` + Dependencies []string `json:"dependencies,omitempty"` +} + +type DataSourceJobJobSettingsSettingsEnvironment struct { + EnvironmentKey string `json:"environment_key"` + Spec *DataSourceJobJobSettingsSettingsEnvironmentSpec `json:"spec,omitempty"` +} + type DataSourceJobJobSettingsSettingsGitSourceJobSource struct { DirtyState string `json:"dirty_state,omitempty"` ImportFromGitBranch string `json:"import_from_git_branch"` @@ -411,6 +412,7 @@ type DataSourceJobJobSettingsSettingsNotebookTask struct { BaseParameters map[string]string `json:"base_parameters,omitempty"` NotebookPath string `json:"notebook_path"` Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` } type DataSourceJobJobSettingsSettingsNotificationSettings struct { @@ -725,6 +727,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotebookTask struct { BaseParameters map[string]string `json:"base_parameters,omitempty"` NotebookPath string `json:"notebook_path"` Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotificationSettings struct { @@ -831,8 +834,8 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications str } type DataSourceJobJobSettingsSettingsTaskForEachTaskTask struct { - ComputeKey string `json:"compute_key,omitempty"` Description string `json:"description,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"` MaxRetries int `json:"max_retries,omitempty"` @@ -1062,6 +1065,7 @@ type DataSourceJobJobSettingsSettingsTaskNotebookTask struct { BaseParameters map[string]string `json:"base_parameters,omitempty"` NotebookPath string `json:"notebook_path"` Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` } type DataSourceJobJobSettingsSettingsTaskNotificationSettings struct { @@ -1168,8 +1172,8 @@ type DataSourceJobJobSettingsSettingsTaskWebhookNotifications struct { } type DataSourceJobJobSettingsSettingsTask struct { - ComputeKey string `json:"compute_key,omitempty"` Description string `json:"description,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"` MaxRetries int `json:"max_retries,omitempty"` @@ -1252,11 +1256,11 @@ type DataSourceJobJobSettingsSettings struct { RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` Tags map[string]string `json:"tags,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"` - Compute []DataSourceJobJobSettingsSettingsCompute `json:"compute,omitempty"` Continuous *DataSourceJobJobSettingsSettingsContinuous `json:"continuous,omitempty"` DbtTask *DataSourceJobJobSettingsSettingsDbtTask `json:"dbt_task,omitempty"` Deployment *DataSourceJobJobSettingsSettingsDeployment `json:"deployment,omitempty"` EmailNotifications *DataSourceJobJobSettingsSettingsEmailNotifications `json:"email_notifications,omitempty"` + Environment []DataSourceJobJobSettingsSettingsEnvironment `json:"environment,omitempty"` GitSource *DataSourceJobJobSettingsSettingsGitSource `json:"git_source,omitempty"` Health *DataSourceJobJobSettingsSettingsHealth `json:"health,omitempty"` JobCluster []DataSourceJobJobSettingsSettingsJobCluster `json:"job_cluster,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_metastore.go b/bundle/internal/tf/schema/data_source_metastore.go index dd14be81c..ce2064794 100644 --- a/bundle/internal/tf/schema/data_source_metastore.go +++ b/bundle/internal/tf/schema/data_source_metastore.go @@ -25,6 +25,8 @@ type DataSourceMetastoreMetastoreInfo struct { type DataSourceMetastore struct { Id string `json:"id,omitempty"` - MetastoreId string `json:"metastore_id"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Region string `json:"region,omitempty"` MetastoreInfo *DataSourceMetastoreMetastoreInfo `json:"metastore_info,omitempty"` } diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 698cbec93..2e02c4388 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -17,6 +17,8 @@ type DataSources struct { DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"` Directory map[string]any `json:"databricks_directory,omitempty"` + ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` + ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"` Group map[string]any `json:"databricks_group,omitempty"` InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"` @@ -64,6 +66,8 @@ func NewDataSources() *DataSources { DbfsFile: make(map[string]any), DbfsFilePaths: make(map[string]any), Directory: make(map[string]any), + ExternalLocation: make(map[string]any), + ExternalLocations: make(map[string]any), Group: make(map[string]any), InstancePool: make(map[string]any), InstanceProfiles: make(map[string]any), diff --git a/bundle/internal/tf/schema/resource_catalog.go b/bundle/internal/tf/schema/resource_catalog.go index a54f1c270..76c355288 100644 --- a/bundle/internal/tf/schema/resource_catalog.go +++ b/bundle/internal/tf/schema/resource_catalog.go @@ -3,17 +3,18 @@ package schema type ResourceCatalog struct { - Comment string `json:"comment,omitempty"` - ConnectionName string `json:"connection_name,omitempty"` - ForceDestroy bool `json:"force_destroy,omitempty"` - Id string `json:"id,omitempty"` - IsolationMode string `json:"isolation_mode,omitempty"` - MetastoreId string `json:"metastore_id,omitempty"` - Name string `json:"name"` - Options map[string]string `json:"options,omitempty"` - Owner string `json:"owner,omitempty"` - Properties map[string]string `json:"properties,omitempty"` - ProviderName string `json:"provider_name,omitempty"` - ShareName string `json:"share_name,omitempty"` - StorageRoot string `json:"storage_root,omitempty"` + Comment string `json:"comment,omitempty"` + ConnectionName string `json:"connection_name,omitempty"` + EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"` + ForceDestroy bool `json:"force_destroy,omitempty"` + Id string `json:"id,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Options map[string]string `json:"options,omitempty"` + Owner string `json:"owner,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + ProviderName string `json:"provider_name,omitempty"` + ShareName string `json:"share_name,omitempty"` + StorageRoot string `json:"storage_root,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index 111efe8d5..6f866ba87 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -32,6 +32,10 @@ type ResourceClusterAzureAttributes struct { LogAnalyticsInfo *ResourceClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } +type ResourceClusterCloneFrom struct { + SourceClusterId string `json:"source_cluster_id"` +} + type ResourceClusterClusterLogConfDbfs struct { Destination string `json:"destination"` } @@ -190,6 +194,7 @@ type ResourceCluster struct { Autoscale *ResourceClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceClusterAzureAttributes `json:"azure_attributes,omitempty"` + CloneFrom *ResourceClusterCloneFrom `json:"clone_from,omitempty"` ClusterLogConf *ResourceClusterClusterLogConf `json:"cluster_log_conf,omitempty"` ClusterMountInfo []ResourceClusterClusterMountInfo `json:"cluster_mount_info,omitempty"` DockerImage *ResourceClusterDockerImage `json:"docker_image,omitempty"` diff --git a/bundle/internal/tf/schema/resource_instance_pool.go b/bundle/internal/tf/schema/resource_instance_pool.go index f524b3fce..0097a4913 100644 --- a/bundle/internal/tf/schema/resource_instance_pool.go +++ b/bundle/internal/tf/schema/resource_instance_pool.go @@ -27,6 +27,7 @@ type ResourceInstancePoolDiskSpec struct { type ResourceInstancePoolGcpAttributes struct { GcpAvailability string `json:"gcp_availability,omitempty"` LocalSsdCount int `json:"local_ssd_count,omitempty"` + ZoneId string `json:"zone_id,omitempty"` } type ResourceInstancePoolInstancePoolFleetAttributesFleetOnDemandOption struct { diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 83e80c9c8..2431262c1 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -2,15 +2,6 @@ package schema -type ResourceJobComputeSpec struct { - Kind string `json:"kind,omitempty"` -} - -type ResourceJobCompute struct { - ComputeKey string `json:"compute_key,omitempty"` - Spec *ResourceJobComputeSpec `json:"spec,omitempty"` -} - type ResourceJobContinuous struct { PauseStatus string `json:"pause_status,omitempty"` } @@ -38,6 +29,16 @@ type ResourceJobEmailNotifications struct { OnSuccess []string `json:"on_success,omitempty"` } +type ResourceJobEnvironmentSpec struct { + Client string `json:"client"` + Dependencies []string `json:"dependencies,omitempty"` +} + +type ResourceJobEnvironment struct { + EnvironmentKey string `json:"environment_key"` + Spec *ResourceJobEnvironmentSpec `json:"spec,omitempty"` +} + type ResourceJobGitSourceJobSource struct { DirtyState string `json:"dirty_state,omitempty"` ImportFromGitBranch string `json:"import_from_git_branch"` @@ -411,6 +412,7 @@ type ResourceJobNotebookTask struct { BaseParameters map[string]string `json:"base_parameters,omitempty"` NotebookPath string `json:"notebook_path"` Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` } type ResourceJobNotificationSettings struct { @@ -725,6 +727,7 @@ type ResourceJobTaskForEachTaskTaskNotebookTask struct { BaseParameters map[string]string `json:"base_parameters,omitempty"` NotebookPath string `json:"notebook_path"` Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` } type ResourceJobTaskForEachTaskTaskNotificationSettings struct { @@ -831,8 +834,8 @@ type ResourceJobTaskForEachTaskTaskWebhookNotifications struct { } type ResourceJobTaskForEachTaskTask struct { - ComputeKey string `json:"compute_key,omitempty"` Description string `json:"description,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"` MaxRetries int `json:"max_retries,omitempty"` @@ -1062,6 +1065,7 @@ type ResourceJobTaskNotebookTask struct { BaseParameters map[string]string `json:"base_parameters,omitempty"` NotebookPath string `json:"notebook_path"` Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` } type ResourceJobTaskNotificationSettings struct { @@ -1168,8 +1172,8 @@ type ResourceJobTaskWebhookNotifications struct { } type ResourceJobTask struct { - ComputeKey string `json:"compute_key,omitempty"` Description string `json:"description,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"` MaxRetries int `json:"max_retries,omitempty"` @@ -1256,11 +1260,11 @@ type ResourceJob struct { Tags map[string]string `json:"tags,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"` Url string `json:"url,omitempty"` - Compute []ResourceJobCompute `json:"compute,omitempty"` Continuous *ResourceJobContinuous `json:"continuous,omitempty"` DbtTask *ResourceJobDbtTask `json:"dbt_task,omitempty"` Deployment *ResourceJobDeployment `json:"deployment,omitempty"` EmailNotifications *ResourceJobEmailNotifications `json:"email_notifications,omitempty"` + Environment []ResourceJobEnvironment `json:"environment,omitempty"` GitSource *ResourceJobGitSource `json:"git_source,omitempty"` Health *ResourceJobHealth `json:"health,omitempty"` JobCluster []ResourceJobJobCluster `json:"job_cluster,omitempty"` diff --git a/bundle/internal/tf/schema/resource_lakehouse_monitor.go b/bundle/internal/tf/schema/resource_lakehouse_monitor.go index 26196d2f5..69dbdd047 100644 --- a/bundle/internal/tf/schema/resource_lakehouse_monitor.go +++ b/bundle/internal/tf/schema/resource_lakehouse_monitor.go @@ -3,11 +3,11 @@ package schema type ResourceLakehouseMonitorCustomMetrics struct { - Definition string `json:"definition,omitempty"` - InputColumns []string `json:"input_columns,omitempty"` - Name string `json:"name,omitempty"` - OutputDataType string `json:"output_data_type,omitempty"` - Type string `json:"type,omitempty"` + Definition string `json:"definition"` + InputColumns []string `json:"input_columns"` + Name string `json:"name"` + OutputDataType string `json:"output_data_type"` + Type string `json:"type"` } type ResourceLakehouseMonitorDataClassificationConfig struct { @@ -15,35 +15,40 @@ type ResourceLakehouseMonitorDataClassificationConfig struct { } type ResourceLakehouseMonitorInferenceLog struct { - Granularities []string `json:"granularities,omitempty"` + Granularities []string `json:"granularities"` LabelCol string `json:"label_col,omitempty"` - ModelIdCol string `json:"model_id_col,omitempty"` - PredictionCol string `json:"prediction_col,omitempty"` + ModelIdCol string `json:"model_id_col"` + PredictionCol string `json:"prediction_col"` PredictionProbaCol string `json:"prediction_proba_col,omitempty"` - ProblemType string `json:"problem_type,omitempty"` - TimestampCol string `json:"timestamp_col,omitempty"` + ProblemType string `json:"problem_type"` + TimestampCol string `json:"timestamp_col"` } type ResourceLakehouseMonitorNotificationsOnFailure struct { EmailAddresses []string `json:"email_addresses,omitempty"` } +type ResourceLakehouseMonitorNotificationsOnNewClassificationTagDetected struct { + EmailAddresses []string `json:"email_addresses,omitempty"` +} + type ResourceLakehouseMonitorNotifications struct { - OnFailure *ResourceLakehouseMonitorNotificationsOnFailure `json:"on_failure,omitempty"` + OnFailure *ResourceLakehouseMonitorNotificationsOnFailure `json:"on_failure,omitempty"` + OnNewClassificationTagDetected *ResourceLakehouseMonitorNotificationsOnNewClassificationTagDetected `json:"on_new_classification_tag_detected,omitempty"` } type ResourceLakehouseMonitorSchedule struct { PauseStatus string `json:"pause_status,omitempty"` - QuartzCronExpression string `json:"quartz_cron_expression,omitempty"` - TimezoneId string `json:"timezone_id,omitempty"` + QuartzCronExpression string `json:"quartz_cron_expression"` + TimezoneId string `json:"timezone_id"` } type ResourceLakehouseMonitorSnapshot struct { } type ResourceLakehouseMonitorTimeSeries struct { - Granularities []string `json:"granularities,omitempty"` - TimestampCol string `json:"timestamp_col,omitempty"` + Granularities []string `json:"granularities"` + TimestampCol string `json:"timestamp_col"` } type ResourceLakehouseMonitor struct { diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 3cad9ac41..20c25c1e2 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -117,6 +117,11 @@ type ResourcePipelineCluster struct { InitScripts []ResourcePipelineClusterInitScripts `json:"init_scripts,omitempty"` } +type ResourcePipelineDeployment struct { + Kind string `json:"kind,omitempty"` + MetadataFilePath string `json:"metadata_file_path,omitempty"` +} + type ResourcePipelineFilters struct { Exclude []string `json:"exclude,omitempty"` Include []string `json:"include,omitempty"` @@ -165,6 +170,7 @@ type ResourcePipeline struct { Target string `json:"target,omitempty"` Url string `json:"url,omitempty"` Cluster []ResourcePipelineCluster `json:"cluster,omitempty"` + Deployment *ResourcePipelineDeployment `json:"deployment,omitempty"` Filters *ResourcePipelineFilters `json:"filters,omitempty"` Library []ResourcePipelineLibrary `json:"library,omitempty"` Notification []ResourcePipelineNotification `json:"notification,omitempty"` diff --git a/bundle/internal/tf/schema/resource_recipient.go b/bundle/internal/tf/schema/resource_recipient.go index 47d6de37c..91de4df76 100644 --- a/bundle/internal/tf/schema/resource_recipient.go +++ b/bundle/internal/tf/schema/resource_recipient.go @@ -3,7 +3,11 @@ package schema type ResourceRecipientIpAccessList struct { - AllowedIpAddresses []string `json:"allowed_ip_addresses"` + AllowedIpAddresses []string `json:"allowed_ip_addresses,omitempty"` +} + +type ResourceRecipientPropertiesKvpairs struct { + Properties map[string]string `json:"properties"` } type ResourceRecipientTokens struct { @@ -17,13 +21,23 @@ type ResourceRecipientTokens struct { } type ResourceRecipient struct { - AuthenticationType string `json:"authentication_type"` - Comment string `json:"comment,omitempty"` - DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"` - Id string `json:"id,omitempty"` - Name string `json:"name"` - Owner string `json:"owner,omitempty"` - SharingCode string `json:"sharing_code,omitempty"` - IpAccessList *ResourceRecipientIpAccessList `json:"ip_access_list,omitempty"` - Tokens []ResourceRecipientTokens `json:"tokens,omitempty"` + Activated bool `json:"activated,omitempty"` + ActivationUrl string `json:"activation_url,omitempty"` + AuthenticationType string `json:"authentication_type"` + Cloud string `json:"cloud,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"` + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Owner string `json:"owner,omitempty"` + Region string `json:"region,omitempty"` + SharingCode string `json:"sharing_code,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + IpAccessList *ResourceRecipientIpAccessList `json:"ip_access_list,omitempty"` + PropertiesKvpairs *ResourceRecipientPropertiesKvpairs `json:"properties_kvpairs,omitempty"` + Tokens []ResourceRecipientTokens `json:"tokens,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_schema.go b/bundle/internal/tf/schema/resource_schema.go index f1949b07f..3ac8d813d 100644 --- a/bundle/internal/tf/schema/resource_schema.go +++ b/bundle/internal/tf/schema/resource_schema.go @@ -3,13 +3,14 @@ package schema type ResourceSchema struct { - CatalogName string `json:"catalog_name"` - Comment string `json:"comment,omitempty"` - ForceDestroy bool `json:"force_destroy,omitempty"` - Id string `json:"id,omitempty"` - MetastoreId string `json:"metastore_id,omitempty"` - Name string `json:"name"` - Owner string `json:"owner,omitempty"` - Properties map[string]string `json:"properties,omitempty"` - StorageRoot string `json:"storage_root,omitempty"` + CatalogName string `json:"catalog_name"` + Comment string `json:"comment,omitempty"` + EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"` + ForceDestroy bool `json:"force_destroy,omitempty"` + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Owner string `json:"owner,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + StorageRoot string `json:"storage_root,omitempty"` } diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 0bfab73fb..be6852bc0 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.39.0" +const ProviderVersion = "1.40.0" func NewRoot() *Root { return &Root{ From 3c14204e980f64f449682b4ce3975f2da8d4ef2e Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 19 Apr 2024 02:22:11 +0530 Subject: [PATCH 146/286] Followup improvements to the Docker setup script (#1369) ## Changes This PR: 1. Uses bash to run the setup.sh script instead of the native busybox sh shipped with alpine. 2. Verifies the checksums of the installed terraform CLI binaries. ## Tests Manually. The docker image successfully builds. --------- Co-authored-by: Pieter Noordhuis --- Dockerfile | 1 + bundle/deploy/terraform/pkg.go | 32 +++++++++++++++--- bundle/deploy/terraform/pkg_test.go | 51 +++++++++++++++++++++++++++++ docker/setup.sh | 17 +++++++++- 4 files changed, 95 insertions(+), 6 deletions(-) create mode 100644 bundle/deploy/terraform/pkg_test.go diff --git a/Dockerfile b/Dockerfile index d4e7614c8..b2a61a767 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,7 @@ FROM alpine:3.19 as builder RUN ["apk", "add", "jq"] +RUN ["apk", "add", "bash"] WORKDIR /build diff --git a/bundle/deploy/terraform/pkg.go b/bundle/deploy/terraform/pkg.go index 911583f29..cad754024 100644 --- a/bundle/deploy/terraform/pkg.go +++ b/bundle/deploy/terraform/pkg.go @@ -15,18 +15,40 @@ const TerraformVersionEnv = "DATABRICKS_TF_VERSION" const TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE" const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" +// Terraform CLI version to use and the corresponding checksums for it. The +// checksums are used to verify the integrity of the downloaded binary. Please +// update the checksums when the Terraform version is updated. The checksums +// were obtained from https://releases.hashicorp.com/terraform/1.5.5. +// +// These hashes are not used inside the CLI. They are only co-located here to be +// output in the "databricks bundle debug terraform" output. Downstream applications +// like the CLI docker image use these checksums to verify the integrity of the +// downloaded Terraform archive. var TerraformVersion = version.Must(version.NewVersion("1.5.5")) +const checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2" +const checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a" + +type Checksum struct { + LinuxArm64 string `json:"linux_arm64"` + LinuxAmd64 string `json:"linux_amd64"` +} + type TerraformMetadata struct { - Version string `json:"version"` - ProviderHost string `json:"providerHost"` - ProviderSource string `json:"providerSource"` - ProviderVersion string `json:"providerVersion"` + Version string `json:"version"` + Checksum Checksum `json:"checksum"` + ProviderHost string `json:"providerHost"` + ProviderSource string `json:"providerSource"` + ProviderVersion string `json:"providerVersion"` } func NewTerraformMetadata() *TerraformMetadata { return &TerraformMetadata{ - Version: TerraformVersion.String(), + Version: TerraformVersion.String(), + Checksum: Checksum{ + LinuxArm64: checksumLinuxArm64, + LinuxAmd64: checksumLinuxAmd64, + }, ProviderHost: schema.ProviderHost, ProviderSource: schema.ProviderSource, ProviderVersion: schema.ProviderVersion, diff --git a/bundle/deploy/terraform/pkg_test.go b/bundle/deploy/terraform/pkg_test.go new file mode 100644 index 000000000..b8dcb9e08 --- /dev/null +++ b/bundle/deploy/terraform/pkg_test.go @@ -0,0 +1,51 @@ +package terraform + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func downloadAndChecksum(t *testing.T, url string, expectedChecksum string) { + resp, err := http.Get(url) + require.NoError(t, err) + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + t.Fatalf("failed to download %s: %s", url, resp.Status) + } + + tmpDir := t.TempDir() + tmpFile, err := os.Create(filepath.Join(tmpDir, "archive.zip")) + require.NoError(t, err) + defer tmpFile.Close() + + _, err = io.Copy(tmpFile, resp.Body) + require.NoError(t, err) + + _, err = tmpFile.Seek(0, 0) // go back to the start of the file + require.NoError(t, err) + + hash := sha256.New() + _, err = io.Copy(hash, tmpFile) + require.NoError(t, err) + + checksum := hex.EncodeToString(hash.Sum(nil)) + assert.Equal(t, expectedChecksum, checksum) +} + +func TestTerraformArchiveChecksums(t *testing.T) { + armUrl := fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_linux_arm64.zip", TerraformVersion, TerraformVersion) + amdUrl := fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_linux_amd64.zip", TerraformVersion, TerraformVersion) + + downloadAndChecksum(t, amdUrl, checksumLinuxAmd64) + downloadAndChecksum(t, armUrl, checksumLinuxArm64) +} diff --git a/docker/setup.sh b/docker/setup.sh index 3f6c09dc7..0dc06ce1e 100755 --- a/docker/setup.sh +++ b/docker/setup.sh @@ -1,12 +1,27 @@ -#!/bin/sh +#!/bin/bash set -euo pipefail DATABRICKS_TF_VERSION=$(/app/databricks bundle debug terraform --output json | jq -r .terraform.version) DATABRICKS_TF_PROVIDER_VERSION=$(/app/databricks bundle debug terraform --output json | jq -r .terraform.providerVersion) +if [ $ARCH != "amd64" ] && [ $ARCH != "arm64" ]; then + echo "Unsupported architecture: $ARCH" + exit 1 +fi + # Download the terraform binary mkdir -p zip wget https://releases.hashicorp.com/terraform/${DATABRICKS_TF_VERSION}/terraform_${DATABRICKS_TF_VERSION}_linux_${ARCH}.zip -O zip/terraform.zip + +# Verify the checksum. This is to ensure that the downloaded archive is not tampered with. +EXPECTED_CHECKSUM="$(/app/databricks bundle debug terraform --output json | jq -r .terraform.checksum.linux_$ARCH)" +COMPUTED_CHECKSUM=$(sha256sum zip/terraform.zip | awk '{ print $1 }') +if [ "$COMPUTED_CHECKSUM" != "$EXPECTED_CHECKSUM" ]; then + echo "Checksum mismatch for Terraform binary. Version: $DATABRICKS_TF_VERSION, Arch: $ARCH, Expected checksum: $EXPECTED_CHECKSUM, Computed checksum: $COMPUTED_CHECKSUM." + exit 1 +fi + +# Unzip the terraform binary. It's safe to do so because we have already verified the checksum. unzip zip/terraform.zip -d zip/terraform mkdir -p /app/bin mv zip/terraform/terraform /app/bin/terraform From 6e59b134520c42932e9db998dd6614c7b9b8158d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 19 Apr 2024 13:31:54 +0200 Subject: [PATCH 147/286] Update Spark version in integration tests to 13.3 (#1375) ## Tests Integration test run pending. --- internal/bundle/basic_test.go | 2 +- internal/bundle/helpers.go | 2 ++ internal/bundle/job_metadata_test.go | 2 +- internal/bundle/local_state_staleness_test.go | 2 +- internal/bundle/python_wheel_test.go | 3 ++- 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/internal/bundle/basic_test.go b/internal/bundle/basic_test.go index 6eb3d10fb..c24ef0c05 100644 --- a/internal/bundle/basic_test.go +++ b/internal/bundle/basic_test.go @@ -20,7 +20,7 @@ func TestAccBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { root, err := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, - "spark_version": "13.2.x-snapshot-scala2.12", + "spark_version": defaultSparkVersion, }) require.NoError(t, err) diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index c73d6ad03..560a0474b 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -18,6 +18,8 @@ import ( "github.com/stretchr/testify/require" ) +const defaultSparkVersion = "13.3.x-snapshot-scala2.12" + func initTestTemplate(t *testing.T, ctx context.Context, templateName string, config map[string]any) (string, error) { templateRoot := filepath.Join("bundles", templateName) diff --git a/internal/bundle/job_metadata_test.go b/internal/bundle/job_metadata_test.go index cb3ad0818..21f1086ae 100644 --- a/internal/bundle/job_metadata_test.go +++ b/internal/bundle/job_metadata_test.go @@ -28,7 +28,7 @@ func TestAccJobsMetadataFile(t *testing.T) { bundleRoot, err := initTestTemplate(t, ctx, "job_metadata", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, - "spark_version": "13.2.x-snapshot-scala2.12", + "spark_version": defaultSparkVersion, }) require.NoError(t, err) diff --git a/internal/bundle/local_state_staleness_test.go b/internal/bundle/local_state_staleness_test.go index 872ac8a8e..d11234667 100644 --- a/internal/bundle/local_state_staleness_test.go +++ b/internal/bundle/local_state_staleness_test.go @@ -31,7 +31,7 @@ func TestAccLocalStateStaleness(t *testing.T) { root, err := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, - "spark_version": "13.2.x-snapshot-scala2.12", + "spark_version": defaultSparkVersion, }) require.NoError(t, err) diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index fc14fd17b..1299194b2 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -43,7 +43,8 @@ func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bo } func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { - runPythonWheelTest(t, "13.2.x-snapshot-scala2.12", false) + // This is the first DBR version where we can install Python wheels from the Workspace File System. + runPythonWheelTest(t, "13.3.x-snapshot-scala2.12", false) } func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { From 331313ea5f5d7c387183e5735185a13cc0cc0f1e Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 19 Apr 2024 17:13:50 +0530 Subject: [PATCH 148/286] Print host in `bundle validate` when passed via profile or environment variables (#1378) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes Fixes to get host from the workspace client rather than only printing the host when it's configured in the bundle config. ## Tests Manually. When a profile was specified for auth. Before: ``` ➜ bundle-playground git:(master) ✗ cli bundle validate Name: bundle-playground Target: default Workspace: Host: User: shreyas.goenka@databricks.com Path: /Users/shreyas.goenka@databricks.com/.bundle/bundle-playground/default ``` After: ``` ➜ bundle-playground git:(master) ✗ cli bundle validate Name: bundle-playground Target: default Workspace: Host: https://e2-dogfood.staging.cloud.databricks.com User: shreyas.goenka@databricks.com Path: /Users/shreyas.goenka@databricks.com/.bundle/bundle-playground/default ``` --- cmd/bundle/validate.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 0472df479..8d49ec961 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -48,7 +48,7 @@ const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }} const summaryTemplate = `Name: {{ .Config.Bundle.Name | bold }} Target: {{ .Config.Bundle.Target | bold }} Workspace: - Host: {{ .Config.Workspace.Host | bold }} + Host: {{ .WorkspaceClient.Config.Host | bold }} User: {{ .Config.Workspace.CurrentUser.UserName | bold }} Path: {{ .Config.Workspace.RootPath | bold }} @@ -107,8 +107,9 @@ func renderTextOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnosti // Print validation summary. t := template.Must(template.New("summary").Funcs(validateFuncMap).Parse(summaryTemplate)) err := t.Execute(cmd.OutOrStdout(), map[string]any{ - "Config": b.Config, - "Trailer": buildTrailer(diags), + "Config": b.Config, + "Trailer": buildTrailer(diags), + "WorkspaceClient": b.WorkspaceClient(), }) if err != nil { return err From f6c4d6d15217001659aeab2eaa92110e966f1982 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 19 Apr 2024 17:14:05 +0530 Subject: [PATCH 149/286] Add NOTICE for using Terraform 1.5.5 licensed under MPL 2.0 (#1377) We package the terraform 1.5.5 binary in our docker container images for the CLI. This thus needs to be included in our notice for the repository. --- NOTICE | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/NOTICE b/NOTICE index e356d028e..d8306510e 100644 --- a/NOTICE +++ b/NOTICE @@ -36,6 +36,10 @@ hashicorp/terraform-json - https://github.com/hashicorp/terraform-json Copyright 2019 HashiCorp, Inc. License - https://github.com/hashicorp/terraform-json/blob/main/LICENSE +hashicorp/terraform - https://github.com/hashicorp/terraform +Copyright 2014 HashiCorp, Inc. +License - https://github.com/hashicorp/terraform/blob/v1.5.5/LICENSE + --- This software contains code from the following open source projects, licensed under the BSD (2-clause) license: From e008c2bd8cf0e3394b680c069c32316341e3cd4f Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 19 Apr 2024 17:18:04 +0530 Subject: [PATCH 150/286] Cleanup remote file path on bundle destroy (#1374) ## Changes The sync struct initialization would recreate the deleted `file_path`. This PR moves to not initializing the sync object to delete the snapshot, thus fixing the lingering `file_path` after `bundle destroy`. ## Tests Manually, and a integration test to prevent regression. --- bundle/deploy/files/delete.go | 25 +++++++++--- internal/bundle/destroy_test.go | 70 +++++++++++++++++++++++++++++++++ libs/sync/snapshot.go | 8 ---- libs/sync/sync.go | 4 -- 4 files changed, 89 insertions(+), 18 deletions(-) create mode 100644 internal/bundle/destroy_test.go diff --git a/bundle/deploy/files/delete.go b/bundle/deploy/files/delete.go index 46c554463..066368a6b 100644 --- a/bundle/deploy/files/delete.go +++ b/bundle/deploy/files/delete.go @@ -3,10 +3,12 @@ package files import ( "context" "fmt" + "os" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/sync" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/fatih/color" ) @@ -46,20 +48,31 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { } // Clean up sync snapshot file - sync, err := GetSync(ctx, bundle.ReadOnly(b)) - if err != nil { - return diag.FromErr(err) - } - err = sync.DestroySnapshot(ctx) + err = deleteSnapshotFile(ctx, b) if err != nil { return diag.FromErr(err) } - cmdio.LogString(ctx, fmt.Sprintf("Deleted snapshot file at %s", sync.SnapshotPath())) cmdio.LogString(ctx, "Successfully deleted files!") return nil } +func deleteSnapshotFile(ctx context.Context, b *bundle.Bundle) error { + opts, err := GetSyncOptions(ctx, bundle.ReadOnly(b)) + if err != nil { + return fmt.Errorf("cannot get sync options: %w", err) + } + sp, err := sync.SnapshotPath(opts) + if err != nil { + return err + } + err = os.Remove(sp) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to destroy sync snapshot file: %s", err) + } + return nil +} + func Delete() bundle.Mutator { return &delete{} } diff --git a/internal/bundle/destroy_test.go b/internal/bundle/destroy_test.go new file mode 100644 index 000000000..43c05fbae --- /dev/null +++ b/internal/bundle/destroy_test.go @@ -0,0 +1,70 @@ +package bundle + +import ( + "errors" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/internal/acc" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccBundleDestroy(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + "unique_id": uniqueId, + }) + require.NoError(t, err) + + snapshotsDir := filepath.Join(bundleRoot, ".databricks", "bundle", "default", "sync-snapshots") + + // Assert the snapshot file does not exist + _, err = os.ReadDir(snapshotsDir) + assert.ErrorIs(t, err, os.ErrNotExist) + + // deploy pipeline + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + // Assert the snapshot file exists + entries, err := os.ReadDir(snapshotsDir) + assert.NoError(t, err) + assert.Len(t, entries, 1) + + // Assert bundle deployment path is created + remoteRoot := getBundleRemoteRootPath(w, t, uniqueId) + _, err = w.Workspace.GetStatusByPath(ctx, remoteRoot) + assert.NoError(t, err) + + // assert pipeline is created + pipelineName := "test-bundle-pipeline-" + uniqueId + pipeline, err := w.Pipelines.GetByName(ctx, pipelineName) + require.NoError(t, err) + assert.Equal(t, pipeline.Name, pipelineName) + + // destroy bundle + err = destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + // assert pipeline is deleted + _, err = w.Pipelines.GetByName(ctx, pipelineName) + assert.ErrorContains(t, err, "does not exist") + + // Assert snapshot file is deleted + entries, err = os.ReadDir(snapshotsDir) + require.NoError(t, err) + assert.Len(t, entries, 0) + + // Assert bundle deployment path is deleted + _, err = w.Workspace.GetStatusByPath(ctx, remoteRoot) + apiErr := &apierr.APIError{} + assert.True(t, errors.As(err, &apiErr)) + assert.Equal(t, "RESOURCE_DOES_NOT_EXIST", apiErr.ErrorCode) +} diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index 06b4d13bc..a27a8c84f 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -138,14 +138,6 @@ func (s *Snapshot) Save(ctx context.Context) error { return nil } -func (s *Snapshot) Destroy(ctx context.Context) error { - err := os.Remove(s.SnapshotPath) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to destroy sync snapshot file: %s", err) - } - return nil -} - func loadOrNewSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { snapshot, err := newSnapshot(ctx, opts) if err != nil { diff --git a/libs/sync/sync.go b/libs/sync/sync.go index 78faa0c8f..30b68ccf3 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -216,10 +216,6 @@ func (s *Sync) GetFileList(ctx context.Context) ([]fileset.File, error) { return all.Iter(), nil } -func (s *Sync) DestroySnapshot(ctx context.Context) error { - return s.snapshot.Destroy(ctx) -} - func (s *Sync) SnapshotPath() string { return s.snapshot.SnapshotPath } From 6ca57a7e68675a3be27cc3832a5a96e1a6e57073 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 19 Apr 2024 19:39:33 +0530 Subject: [PATCH 151/286] Add docs URL for `run_as` in error message (#1381) --- bundle/config/mutator/run_as.go | 4 +--- bundle/tests/run_as_test.go | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index 578591eb1..8da233c27 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -35,10 +35,8 @@ type errUnsupportedResourceTypeForRunAs struct { runAsUser string } -// TODO(6 March 2024): Link the docs page describing run_as semantics in the error below -// once the page is ready. func (e errUnsupportedResourceTypeForRunAs) Error() string { - return fmt.Sprintf("%s are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Location of the unsupported resource: %s. Current identity: %s. Run as identity: %s", e.resourceType, e.resourceLocation, e.currentUser, e.runAsUser) + return fmt.Sprintf("%s are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s. Current identity: %s. Run as identity: %s", e.resourceType, e.resourceLocation, e.currentUser, e.runAsUser) } type errBothSpAndUserSpecified struct { diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 3b9deafe0..40359c17d 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -113,7 +113,7 @@ func TestRunAsErrorForPipelines(t *testing.T) { err := diags.Error() configPath := filepath.FromSlash("run_as/not_allowed/pipelines/databricks.yml") - assert.EqualError(t, err, fmt.Sprintf("pipelines are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath)) + assert.EqualError(t, err, fmt.Sprintf("pipelines are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath)) } func TestRunAsNoErrorForPipelines(t *testing.T) { @@ -152,7 +152,7 @@ func TestRunAsErrorForModelServing(t *testing.T) { err := diags.Error() configPath := filepath.FromSlash("run_as/not_allowed/model_serving/databricks.yml") - assert.EqualError(t, err, fmt.Sprintf("model_serving_endpoints are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath)) + assert.EqualError(t, err, fmt.Sprintf("model_serving_endpoints are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath)) } func TestRunAsNoErrorForModelServingEndpoints(t *testing.T) { From ebbb7161645fb32781172a14dcf2b835f4f402c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 14:10:18 +0000 Subject: [PATCH 152/286] Bump golang.org/x/net from 0.22.0 to 0.23.0 (#1380) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.22.0 to 0.23.0.
Commits
  • c48da13 http2: fix TestServerContinuationFlood flakes
  • 762b58d http2: fix tipos in comment
  • ba87210 http2: close connections when receiving too many headers
  • ebc8168 all: fix some typos
  • 3678185 http2: make TestCanonicalHeaderCacheGrowth faster
  • 448c44f http2: remove clientTester
  • c7877ac http2: convert the remaining clientTester tests to testClientConn
  • d8870b0 http2: use synthetic time in TestIdleConnTimeout
  • d73acff http2: only set up deadline when Server.IdleTimeout is positive
  • 89f602b http2: validate client/outgoing trailers
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/net&package-manager=go_modules&previous-version=0.22.0&new-version=0.23.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/databricks/cli/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4ba3076b0..6a991b0ec 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/crypto v0.21.0 // indirect - golang.org/x/net v0.22.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.19.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.169.0 // indirect diff --git a/go.sum b/go.sum index 07137405f..8fe9109b5 100644 --- a/go.sum +++ b/go.sum @@ -188,8 +188,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= From b296f90767a9b1131e04efc90ac9204e440db1d2 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 19 Apr 2024 16:12:52 +0200 Subject: [PATCH 153/286] Add trailing newline in usage string (#1382) ## Changes The default template includes a final newline but this was missing from the cmdgroup template. This change also adds test coverage for inherited flags and the flag group description. --- libs/cmdgroup/command_test.go | 17 +++++++++++++++-- libs/cmdgroup/template.go | 3 ++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/libs/cmdgroup/command_test.go b/libs/cmdgroup/command_test.go index 9122c7809..f3e3fe6ab 100644 --- a/libs/cmdgroup/command_test.go +++ b/libs/cmdgroup/command_test.go @@ -17,8 +17,16 @@ func TestCommandFlagGrouping(t *testing.T) { }, } + parent := &cobra.Command{ + Use: "parent", + } + + parent.PersistentFlags().String("global", "", "Global flag") + parent.AddCommand(cmd) + wrappedCmd := NewCommandWithGroupFlag(cmd) jobGroup := NewFlagGroup("Job") + jobGroup.SetDescription("Description.") fs := jobGroup.FlagSet() fs.String("job-name", "", "Name of the job") fs.String("job-type", "", "Type of the job") @@ -37,9 +45,10 @@ func TestCommandFlagGrouping(t *testing.T) { cmd.Usage() expected := `Usage: - test [flags] + parent test [flags] Job Flags: + Description. --job-name string Name of the job --job-type string Type of the job @@ -48,7 +57,11 @@ Pipeline Flags: --pipeline-type string Type of the pipeline Flags: - -b, --bool Bool flag` + -b, --bool Bool flag + +Global Flags: + --global string Global flag +` require.Equal(t, expected, buf.String()) require.NotNil(t, cmd.Flags().Lookup("job-name")) diff --git a/libs/cmdgroup/template.go b/libs/cmdgroup/template.go index 5c1be48fb..d2062c558 100644 --- a/libs/cmdgroup/template.go +++ b/libs/cmdgroup/template.go @@ -11,4 +11,5 @@ const usageTemplate = `Usage:{{if .Command.Runnable}} {{.NonGroupedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .Command.HasAvailableInheritedFlags}} Global Flags: -{{.Command.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}` +{{.Command.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}} +` From cd675ded9ac504e9652a7e9a90ce83493adaad98 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 19 Apr 2024 17:05:36 +0200 Subject: [PATCH 154/286] Update `testutil` helpers to return path (#1383) ## Changes I spotted a few call sites where the path of a test file was synthesized multiple times. It is easier to capture the path as a variable and reuse it. --- bundle/deploy/state_pull_test.go | 5 ++--- bundle/deploy/terraform/init_test.go | 14 +++++++------- internal/testutil/helpers.go | 26 -------------------------- internal/testutil/touch.go | 26 ++++++++++++++++++++++++++ libs/fileset/file_test.go | 24 ++++++++++++++---------- 5 files changed, 49 insertions(+), 46 deletions(-) delete mode 100644 internal/testutil/helpers.go create mode 100644 internal/testutil/touch.go diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index ca4834731..bcb88374f 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -6,7 +6,6 @@ import ( "encoding/json" "io" "os" - "path/filepath" "testing" "github.com/databricks/cli/bundle" @@ -77,11 +76,11 @@ func testStatePull(t *testing.T, opts statePullOpts) { ctx := context.Background() for _, file := range opts.localFiles { - testutil.Touch(t, filepath.Join(b.RootPath, "bar"), file) + testutil.Touch(t, b.RootPath, "bar", file) } for _, file := range opts.localNotebooks { - testutil.TouchNotebook(t, filepath.Join(b.RootPath, "bar"), file) + testutil.TouchNotebook(t, b.RootPath, "bar", file) } if opts.withExistingSnapshot { diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index ffc215851..421e9be3f 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -399,7 +399,7 @@ func TestGetEnvVarWithMatchingVersion(t *testing.T) { versionVarName := "FOO_VERSION" tmp := t.TempDir() - testutil.Touch(t, tmp, "bar") + file := testutil.Touch(t, tmp, "bar") var tc = []struct { envValue string @@ -408,19 +408,19 @@ func TestGetEnvVarWithMatchingVersion(t *testing.T) { expected string }{ { - envValue: filepath.Join(tmp, "bar"), + envValue: file, versionValue: "1.2.3", currentVersion: "1.2.3", - expected: filepath.Join(tmp, "bar"), + expected: file, }, { - envValue: filepath.Join(tmp, "does-not-exist"), + envValue: "does-not-exist", versionValue: "1.2.3", currentVersion: "1.2.3", expected: "", }, { - envValue: filepath.Join(tmp, "bar"), + envValue: file, versionValue: "1.2.3", currentVersion: "1.2.4", expected: "", @@ -432,10 +432,10 @@ func TestGetEnvVarWithMatchingVersion(t *testing.T) { expected: "", }, { - envValue: filepath.Join(tmp, "bar"), + envValue: file, versionValue: "", currentVersion: "1.2.3", - expected: filepath.Join(tmp, "bar"), + expected: file, }, } diff --git a/internal/testutil/helpers.go b/internal/testutil/helpers.go deleted file mode 100644 index 853cc16cc..000000000 --- a/internal/testutil/helpers.go +++ /dev/null @@ -1,26 +0,0 @@ -package testutil - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" -) - -func TouchNotebook(t *testing.T, path, file string) { - os.MkdirAll(path, 0755) - f, err := os.Create(filepath.Join(path, file)) - require.NoError(t, err) - - err = os.WriteFile(filepath.Join(path, file), []byte("# Databricks notebook source"), 0644) - require.NoError(t, err) - f.Close() -} - -func Touch(t *testing.T, path, file string) { - os.MkdirAll(path, 0755) - f, err := os.Create(filepath.Join(path, file)) - require.NoError(t, err) - f.Close() -} diff --git a/internal/testutil/touch.go b/internal/testutil/touch.go new file mode 100644 index 000000000..55683f3ed --- /dev/null +++ b/internal/testutil/touch.go @@ -0,0 +1,26 @@ +package testutil + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TouchNotebook(t *testing.T, elems ...string) string { + path := filepath.Join(elems...) + os.MkdirAll(filepath.Dir(path), 0755) + err := os.WriteFile(path, []byte("# Databricks notebook source"), 0644) + require.NoError(t, err) + return path +} + +func Touch(t *testing.T, elems ...string) string { + path := filepath.Join(elems...) + os.MkdirAll(filepath.Dir(path), 0755) + f, err := os.Create(path) + require.NoError(t, err) + f.Close() + return path +} diff --git a/libs/fileset/file_test.go b/libs/fileset/file_test.go index 4adcb1c56..cdfc9ba17 100644 --- a/libs/fileset/file_test.go +++ b/libs/fileset/file_test.go @@ -24,16 +24,20 @@ func TestSourceFileIsNotNotebook(t *testing.T) { func TestUnknownFileDetectsNotebook(t *testing.T) { tmpDir := t.TempDir() - testutil.Touch(t, tmpDir, "test.py") - testutil.TouchNotebook(t, tmpDir, "notebook.py") - f := NewFile(nil, filepath.Join(tmpDir, "test.py"), "test.py") - isNotebook, err := f.IsNotebook() - require.NoError(t, err) - require.False(t, isNotebook) + t.Run("file", func(t *testing.T) { + path := testutil.Touch(t, tmpDir, "test.py") + f := NewFile(nil, path, filepath.Base(path)) + isNotebook, err := f.IsNotebook() + require.NoError(t, err) + require.False(t, isNotebook) + }) - f = NewFile(nil, filepath.Join(tmpDir, "notebook.py"), "notebook.py") - isNotebook, err = f.IsNotebook() - require.NoError(t, err) - require.True(t, isNotebook) + t.Run("notebook", func(t *testing.T) { + path := testutil.TouchNotebook(t, tmpDir, "notebook.py") + f := NewFile(nil, path, filepath.Base(path)) + isNotebook, err := f.IsNotebook() + require.NoError(t, err) + require.True(t, isNotebook) + }) } From 000a7fef8c902a01d0ae577d0140070f1ba39f3c Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 22 Apr 2024 12:36:39 +0200 Subject: [PATCH 155/286] Enable job queueing by default (#1385) ## Changes This enable queueing for jobs by default, following the behavior from API 2.2+. Queing is a best practice and will be the default in API 2.2. Since we're still using API 2.1 which has queueing disabled by default, this PR enables queuing using a mutator. Customers can manually turn off queueing for any job by adding the following to their job spec: ``` queue: enabled: false ``` ## Tests Unit tests, manual confirmation of property after deployment. --------- Co-authored-by: Pieter Noordhuis --- bundle/config/mutator/default_queueing.go | 38 ++++++++ .../config/mutator/default_queueing_test.go | 95 +++++++++++++++++++ bundle/phases/initialize.go | 1 + 3 files changed, 134 insertions(+) create mode 100644 bundle/config/mutator/default_queueing.go create mode 100644 bundle/config/mutator/default_queueing_test.go diff --git a/bundle/config/mutator/default_queueing.go b/bundle/config/mutator/default_queueing.go new file mode 100644 index 000000000..ead77c7a8 --- /dev/null +++ b/bundle/config/mutator/default_queueing.go @@ -0,0 +1,38 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +type defaultQueueing struct{} + +func DefaultQueueing() bundle.Mutator { + return &defaultQueueing{} +} + +func (m *defaultQueueing) Name() string { + return "DefaultQueueing" +} + +// Enable queueing for jobs by default, following the behavior from API 2.2+. +// As of 2024-04, we're still using API 2.1 which has queueing disabled by default. +// This mutator makes sure queueing is enabled by default before we can adopt API 2.2. +func (m *defaultQueueing) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + r := b.Config.Resources + for i := range r.Jobs { + if r.Jobs[i].JobSettings == nil { + r.Jobs[i].JobSettings = &jobs.JobSettings{} + } + if r.Jobs[i].Queue != nil { + continue + } + r.Jobs[i].Queue = &jobs.QueueSettings{ + Enabled: true, + } + } + return nil +} diff --git a/bundle/config/mutator/default_queueing_test.go b/bundle/config/mutator/default_queueing_test.go new file mode 100644 index 000000000..ea60daf7f --- /dev/null +++ b/bundle/config/mutator/default_queueing_test.go @@ -0,0 +1,95 @@ +package mutator + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" +) + +func TestDefaultQueueing(t *testing.T) { + m := DefaultQueueing() + assert.IsType(t, &defaultQueueing{}, m) +} + +func TestDefaultQueueingName(t *testing.T) { + m := DefaultQueueing() + assert.Equal(t, "DefaultQueueing", m.Name()) +} + +func TestDefaultQueueingApplyNoJobs(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{}, + }, + } + d := bundle.Apply(context.Background(), b, DefaultQueueing()) + assert.Len(t, d, 0) + assert.Len(t, b.Config.Resources.Jobs, 0) +} + +func TestDefaultQueueingApplyJobsAlreadyEnabled(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Queue: &jobs.QueueSettings{Enabled: true}, + }, + }, + }, + }, + }, + } + d := bundle.Apply(context.Background(), b, DefaultQueueing()) + assert.Len(t, d, 0) + assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled) +} + +func TestDefaultQueueingApplyEnableQueueing(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": {}, + }, + }, + }, + } + d := bundle.Apply(context.Background(), b, DefaultQueueing()) + assert.Len(t, d, 0) + assert.NotNil(t, b.Config.Resources.Jobs["job"].Queue) + assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled) +} + +func TestDefaultQueueingApplyWithMultipleJobs(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Queue: &jobs.QueueSettings{Enabled: false}, + }, + }, + "job2": {}, + "job3": { + JobSettings: &jobs.JobSettings{ + Queue: &jobs.QueueSettings{Enabled: true}, + }, + }, + }, + }, + }, + } + d := bundle.Apply(context.Background(), b, DefaultQueueing()) + assert.Len(t, d, 0) + assert.False(t, b.Config.Resources.Jobs["job1"].Queue.Enabled) + assert.True(t, b.Config.Resources.Jobs["job2"].Queue.Enabled) + assert.True(t, b.Config.Resources.Jobs["job3"].Queue.Enabled) +} diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index d6a1b95da..2f5eab302 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -38,6 +38,7 @@ func Initialize() bundle.Mutator { mutator.SetRunAs(), mutator.OverrideCompute(), mutator.ProcessTargetMode(), + mutator.DefaultQueueing(), mutator.ExpandPipelineGlobPaths(), mutator.TranslatePaths(), python.WrapperWarning(), From 1872aa12b3086224900e759a7dbed2fdefee8c8c Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 22 Apr 2024 13:44:34 +0200 Subject: [PATCH 156/286] Added support for job environments (#1379) ## Changes The main changes are: 1. Don't link artifacts to libraries anymore and instead just iterate over all jobs and tasks when uploading artifacts and update local path to remote 2. Iterating over `jobs.environments` to check if there are any local libraries and checking that they exist locally 3. Added tests to check environments are handled correctly End-to-end test will follow up ## Tests Added regression test, existing tests (including integration one) pass --- bundle/artifacts/artifacts.go | 63 +++++--- bundle/artifacts/artifacts_test.go | 91 +++++++++++ bundle/artifacts/whl/from_libraries.go | 51 ++++-- bundle/config/mutator/translate_paths.go | 7 + bundle/config/mutator/translate_paths_jobs.go | 47 +++++- bundle/config/mutator/translate_paths_test.go | 43 +++++ bundle/deploy/terraform/tfdyn/convert_job.go | 1 + bundle/libraries/libraries.go | 136 ++++++---------- bundle/libraries/libraries_test.go | 88 ----------- bundle/libraries/local_path.go | 19 +++ bundle/libraries/local_path_test.go | 29 ++++ bundle/libraries/match.go | 59 +++++-- bundle/libraries/match_test.go | 147 ++++++++++++++++++ bundle/phases/deploy.go | 2 +- bundle/python/transform.go | 4 +- bundle/tests/enviroment_key_test.go | 12 ++ .../python_wheel/environment_key/.gitignore | 3 + .../environment_key/databricks.yml | 26 ++++ .../environment_key/my_test_code/setup.py | 15 ++ .../my_test_code/src/__init__.py | 2 + .../my_test_code/src/__main__.py | 16 ++ bundle/tests/python_wheel_test.go | 25 ++- internal/bundle/artifacts_test.go | 64 ++++++++ .../databricks_template_schema.json | 13 ++ .../template/databricks.yml.tmpl | 25 +++ .../template/setup.py.tmpl | 15 ++ .../template/{{.project_name}}/__init__.py | 2 + .../template/{{.project_name}}/__main__.py | 16 ++ internal/bundle/environments_test.go | 39 +++++ internal/bundle/python_wheel_test.go | 1 - 30 files changed, 820 insertions(+), 241 deletions(-) create mode 100644 bundle/artifacts/artifacts_test.go delete mode 100644 bundle/libraries/libraries_test.go create mode 100644 bundle/tests/enviroment_key_test.go create mode 100644 bundle/tests/python_wheel/environment_key/.gitignore create mode 100644 bundle/tests/python_wheel/environment_key/databricks.yml create mode 100644 bundle/tests/python_wheel/environment_key/my_test_code/setup.py create mode 100644 bundle/tests/python_wheel/environment_key/my_test_code/src/__init__.py create mode 100644 bundle/tests/python_wheel/environment_key/my_test_code/src/__main__.py create mode 100644 internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json create mode 100644 internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl create mode 100644 internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl create mode 100644 internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py create mode 100644 internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py create mode 100644 internal/bundle/environments_test.go diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index b7a22d09d..101b598dd 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -12,7 +12,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" @@ -117,8 +116,6 @@ func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost } func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, uploadPath string, client filer.Filer) error { - filesToLibraries := libraries.MapFilesToTaskLibraries(ctx, b) - for i := range a.Files { f := &a.Files[i] @@ -133,24 +130,32 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u log.Infof(ctx, "Upload succeeded") f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) - // Lookup all tasks that reference this file. - libs, ok := filesToLibraries[f.Source] - if !ok { - log.Debugf(ctx, "No tasks reference %s", f.Source) - continue - } + // TODO: confirm if we still need to update the remote path to start with /Workspace + wsfsBase := "/Workspace" + remotePath := path.Join(wsfsBase, f.RemotePath) - // Update all tasks that reference this file. - for _, lib := range libs { - wsfsBase := "/Workspace" - remotePath := path.Join(wsfsBase, f.RemotePath) - if lib.Whl != "" { - lib.Whl = remotePath - continue + for _, job := range b.Config.Resources.Jobs { + for i := range job.Tasks { + task := &job.Tasks[i] + for j := range task.Libraries { + lib := &task.Libraries[j] + if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { + lib.Whl = remotePath + } + if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { + lib.Jar = remotePath + } + } } - if lib.Jar != "" { - lib.Jar = remotePath - continue + + for i := range job.Environments { + env := &job.Environments[i] + for j := range env.Spec.Dependencies { + lib := env.Spec.Dependencies[j] + if isArtifactMatchLibrary(f, lib, b) { + env.Spec.Dependencies[j] = remotePath + } + } } } } @@ -158,6 +163,26 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u return nil } +func isArtifactMatchLibrary(f *config.ArtifactFile, libPath string, b *bundle.Bundle) bool { + if !filepath.IsAbs(libPath) { + libPath = filepath.Join(b.RootPath, libPath) + } + + // libPath can be a glob pattern, so do the match first + matches, err := filepath.Glob(libPath) + if err != nil { + return false + } + + for _, m := range matches { + if m == f.Source { + return true + } + } + + return false +} + // Function to upload artifact file to Workspace func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error { raw, err := os.ReadFile(file) diff --git a/bundle/artifacts/artifacts_test.go b/bundle/artifacts/artifacts_test.go new file mode 100644 index 000000000..ca0e578bd --- /dev/null +++ b/bundle/artifacts/artifacts_test.go @@ -0,0 +1,91 @@ +package artifacts + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestArtifactUpload(t *testing.T) { + tmpDir := t.TempDir() + whlFolder := filepath.Join(tmpDir, "whl") + testutil.Touch(t, whlFolder, "source.whl") + whlLocalPath := filepath.Join(whlFolder, "source.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/foo/bar/artifacts", + }, + Artifacts: config.Artifacts{ + "whl": { + Type: config.ArtifactPythonWheel, + Files: []config.ArtifactFile{ + {Source: whlLocalPath}, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + }, + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + filepath.Join("whl", "source.whl"), + "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + artifact := b.Config.Artifacts["whl"] + mockFiler := mockfiler.NewMockFiler(t) + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source.whl"), + mock.AnythingOfType("*bytes.Reader"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil) + + err := uploadArtifact(context.Background(), b, artifact, "/foo/bar/artifacts", mockFiler) + require.NoError(t, err) + + // Test that libraries path is updated + require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl) + require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl) + require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) + require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) +} diff --git a/bundle/artifacts/whl/from_libraries.go b/bundle/artifacts/whl/from_libraries.go index 84ef712ac..ad321557c 100644 --- a/bundle/artifacts/whl/from_libraries.go +++ b/bundle/artifacts/whl/from_libraries.go @@ -30,24 +30,18 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost tasks := libraries.FindAllWheelTasksWithLocalLibraries(b) for _, task := range tasks { for _, lib := range task.Libraries { - matches, err := filepath.Glob(filepath.Join(b.RootPath, lib.Whl)) - // File referenced from libraries section does not exists, skipping - if err != nil { - continue - } + matchAndAdd(ctx, lib.Whl, b) + } + } - for _, match := range matches { - name := filepath.Base(match) - if b.Config.Artifacts == nil { - b.Config.Artifacts = make(map[string]*config.Artifact) - } - - log.Debugf(ctx, "Adding an artifact block for %s", match) - b.Config.Artifacts[name] = &config.Artifact{ - Files: []config.ArtifactFile{ - {Source: match}, - }, - Type: config.ArtifactPythonWheel, + envs := libraries.FindAllEnvironments(b) + for _, jobEnvs := range envs { + for _, env := range jobEnvs { + if env.Spec != nil { + for _, dep := range env.Spec.Dependencies { + if libraries.IsEnvironmentDependencyLocal(dep) { + matchAndAdd(ctx, dep, b) + } } } } @@ -55,3 +49,26 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost return nil } + +func matchAndAdd(ctx context.Context, lib string, b *bundle.Bundle) { + matches, err := filepath.Glob(filepath.Join(b.RootPath, lib)) + // File referenced from libraries section does not exists, skipping + if err != nil { + return + } + + for _, match := range matches { + name := filepath.Base(match) + if b.Config.Artifacts == nil { + b.Config.Artifacts = make(map[string]*config.Artifact) + } + + log.Debugf(ctx, "Adding an artifact block for %s", match) + b.Config.Artifacts[name] = &config.Artifact{ + Files: []config.ArtifactFile{ + {Source: match}, + }, + Type: config.ArtifactPythonWheel, + } + } +} diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 8fab3abb3..018fd79c6 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -152,6 +152,13 @@ func translateNoOp(literal, localFullPath, localRelPath, remotePath string) (str return localRelPath, nil } +func translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) { + if !strings.HasPrefix(localRelPath, ".") { + localRelPath = "." + string(filepath.Separator) + localRelPath + } + return localRelPath, nil +} + func (m *translatePaths) rewriteValue(b *bundle.Bundle, p dyn.Path, v dyn.Value, fn rewriteFunc, dir string) (dyn.Value, error) { out := v.MustString() err := m.rewritePath(dir, b, &out, fn) diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go index e761bda09..d41660728 100644 --- a/bundle/config/mutator/translate_paths_jobs.go +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -5,39 +5,51 @@ import ( "slices" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/libs/dyn" ) -type jobTaskRewritePattern struct { - pattern dyn.Pattern - fn rewriteFunc +type jobRewritePattern struct { + pattern dyn.Pattern + fn rewriteFunc + skipRewrite func(string) bool } -func rewritePatterns(base dyn.Pattern) []jobTaskRewritePattern { - return []jobTaskRewritePattern{ +func noSkipRewrite(string) bool { + return false +} + +func rewritePatterns(base dyn.Pattern) []jobRewritePattern { + return []jobRewritePattern{ { base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")), translateNotebookPath, + noSkipRewrite, }, { base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")), translateFilePath, + noSkipRewrite, }, { base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")), translateDirectoryPath, + noSkipRewrite, }, { base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")), translateFilePath, + noSkipRewrite, }, { base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")), translateNoOp, + noSkipRewrite, }, { base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")), translateNoOp, + noSkipRewrite, }, } } @@ -73,9 +85,28 @@ func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dy ) // Compile list of patterns and their respective rewrite functions. + jobEnvironmentsPatterns := []jobRewritePattern{ + { + dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("jobs"), + dyn.AnyKey(), + dyn.Key("environments"), + dyn.AnyIndex(), + dyn.Key("spec"), + dyn.Key("dependencies"), + dyn.AnyIndex(), + ), + translateNoOpWithPrefix, + func(s string) bool { + return !libraries.IsEnvironmentDependencyLocal(s) + }, + }, + } taskPatterns := rewritePatterns(base) forEachPatterns := rewritePatterns(base.Append(dyn.Key("for_each_task"), dyn.Key("task"))) - allPatterns := append(taskPatterns, forEachPatterns...) + allPatterns := append(taskPatterns, jobEnvironmentsPatterns...) + allPatterns = append(allPatterns, forEachPatterns...) for _, t := range allPatterns { v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { @@ -91,6 +122,10 @@ func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dy return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err) } + sv := v.MustString() + if t.skipRewrite(sv) { + return v, nil + } return m.rewriteRelativeTo(b, p, v, t.fn, dir, fallback[key]) }) if err != nil { diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 9650ae8ba..29afb9972 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -4,6 +4,7 @@ import ( "context" "os" "path/filepath" + "strings" "testing" "github.com/databricks/cli/bundle" @@ -651,3 +652,45 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`) } + +func TestTranslatePathJobEnvironments(t *testing.T) { + dir := t.TempDir() + touchEmptyFile(t, filepath.Join(dir, "env1.py")) + touchEmptyFile(t, filepath.Join(dir, "env2.py")) + + b := &bundle.Bundle{ + RootPath: dir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + "./dist/env1.whl", + "../dist/env2.whl", + "simplejson", + "/Workspace/Users/foo@bar.com/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) + + assert.Equal(t, strings.Join([]string{".", "job", "dist", "env1.whl"}, string(os.PathSeparator)), b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) + assert.Equal(t, strings.Join([]string{".", "dist", "env2.whl"}, string(os.PathSeparator)), b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) + assert.Equal(t, "simplejson", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[2]) + assert.Equal(t, "/Workspace/Users/foo@bar.com/test.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[3]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_job.go b/bundle/deploy/terraform/tfdyn/convert_job.go index 65ac8b9bd..d1e7e73e2 100644 --- a/bundle/deploy/terraform/tfdyn/convert_job.go +++ b/bundle/deploy/terraform/tfdyn/convert_job.go @@ -24,6 +24,7 @@ func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { "tasks": "task", "job_clusters": "job_cluster", "parameters": "parameter", + "environments": "environment", }) if err != nil { return dyn.InvalidValue, err diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index 8dd63a75a..a79adedbf 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -1,45 +1,71 @@ package libraries import ( - "context" - "fmt" - "path/filepath" - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/log" - "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" ) -func findAllTasks(b *bundle.Bundle) []*jobs.Task { +func findAllTasks(b *bundle.Bundle) map[string]([]jobs.Task) { r := b.Config.Resources - result := make([]*jobs.Task, 0) + result := make(map[string]([]jobs.Task), 0) for k := range b.Config.Resources.Jobs { - tasks := r.Jobs[k].JobSettings.Tasks - for i := range tasks { - task := &tasks[i] - result = append(result, task) - } + result[k] = append(result[k], r.Jobs[k].JobSettings.Tasks...) } return result } +func FindAllEnvironments(b *bundle.Bundle) map[string]([]jobs.JobEnvironment) { + jobEnvs := make(map[string]([]jobs.JobEnvironment), 0) + for jobKey, job := range b.Config.Resources.Jobs { + if len(job.Environments) == 0 { + continue + } + + jobEnvs[jobKey] = job.Environments + } + + return jobEnvs +} + +func isEnvsWithLocalLibraries(envs []jobs.JobEnvironment) bool { + for _, e := range envs { + for _, l := range e.Spec.Dependencies { + if IsEnvironmentDependencyLocal(l) { + return true + } + } + } + + return false +} + func FindAllWheelTasksWithLocalLibraries(b *bundle.Bundle) []*jobs.Task { tasks := findAllTasks(b) + envs := FindAllEnvironments(b) + wheelTasks := make([]*jobs.Task, 0) - for _, task := range tasks { - if task.PythonWheelTask != nil && IsTaskWithLocalLibraries(task) { - wheelTasks = append(wheelTasks, task) + for k, jobTasks := range tasks { + for i := range jobTasks { + task := &jobTasks[i] + if task.PythonWheelTask == nil { + continue + } + + if isTaskWithLocalLibraries(*task) { + wheelTasks = append(wheelTasks, task) + } + + if envs[k] != nil && isEnvsWithLocalLibraries(envs[k]) { + wheelTasks = append(wheelTasks, task) + } } } return wheelTasks } -func IsTaskWithLocalLibraries(task *jobs.Task) bool { +func isTaskWithLocalLibraries(task jobs.Task) bool { for _, l := range task.Libraries { if IsLocalLibrary(&l) { return true @@ -49,7 +75,7 @@ func IsTaskWithLocalLibraries(task *jobs.Task) bool { return false } -func IsTaskWithWorkspaceLibraries(task *jobs.Task) bool { +func IsTaskWithWorkspaceLibraries(task jobs.Task) bool { for _, l := range task.Libraries { if IsWorkspaceLibrary(&l) { return true @@ -58,73 +84,3 @@ func IsTaskWithWorkspaceLibraries(task *jobs.Task) bool { return false } - -func findLibraryMatches(lib *compute.Library, b *bundle.Bundle) ([]string, error) { - path := libraryPath(lib) - if path == "" { - return nil, nil - } - - fullPath := filepath.Join(b.RootPath, path) - return filepath.Glob(fullPath) -} - -func findArtifactFiles(ctx context.Context, lib *compute.Library, b *bundle.Bundle) ([]*config.ArtifactFile, error) { - matches, err := findLibraryMatches(lib, b) - if err != nil { - return nil, err - } - - if len(matches) == 0 && IsLocalLibrary(lib) { - return nil, fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libraryPath(lib)) - } - - var out []*config.ArtifactFile - for _, match := range matches { - af, err := findArtifactFileByLocalPath(match, b) - if err != nil { - cmdio.LogString(ctx, fmt.Sprintf("%s. Skipping uploading. In order to use the define 'artifacts' section", err.Error())) - } else { - out = append(out, af) - } - } - - return out, nil -} - -func findArtifactFileByLocalPath(path string, b *bundle.Bundle) (*config.ArtifactFile, error) { - for _, a := range b.Config.Artifacts { - for k := range a.Files { - if a.Files[k].Source == path { - return &a.Files[k], nil - } - } - } - - return nil, fmt.Errorf("artifact section is not defined for file at %s", path) -} - -func MapFilesToTaskLibraries(ctx context.Context, b *bundle.Bundle) map[string][]*compute.Library { - tasks := findAllTasks(b) - out := make(map[string][]*compute.Library) - for _, task := range tasks { - for j := range task.Libraries { - lib := &task.Libraries[j] - if !IsLocalLibrary(lib) { - continue - } - - matches, err := findLibraryMatches(lib, b) - if err != nil { - log.Warnf(ctx, "Error matching library to files: %s", err.Error()) - continue - } - - for _, match := range matches { - out[match] = append(out[match], lib) - } - } - } - - return out -} diff --git a/bundle/libraries/libraries_test.go b/bundle/libraries/libraries_test.go deleted file mode 100644 index 3da10d47b..000000000 --- a/bundle/libraries/libraries_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package libraries - -import ( - "context" - "path/filepath" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/resources" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/stretchr/testify/assert" -) - -func TestMapFilesToTaskLibrariesNoGlob(t *testing.T) { - b := &bundle.Bundle{ - RootPath: "testdata", - Config: config.Root{ - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "job1": { - JobSettings: &jobs.JobSettings{ - Tasks: []jobs.Task{ - { - Libraries: []compute.Library{ - { - Whl: "library1", - }, - { - Whl: "library2", - }, - { - Whl: "/absolute/path/in/workspace/library3", - }, - }, - }, - { - Libraries: []compute.Library{ - { - Whl: "library1", - }, - { - Whl: "library2", - }, - }, - }, - }, - }, - }, - "job2": { - JobSettings: &jobs.JobSettings{ - Tasks: []jobs.Task{ - { - Libraries: []compute.Library{ - { - Whl: "library1", - }, - { - Whl: "library2", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - out := MapFilesToTaskLibraries(context.Background(), b) - assert.Len(t, out, 2) - - // Pointer equality for "library1" - assert.Equal(t, []*compute.Library{ - &b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[0], - &b.Config.Resources.Jobs["job1"].JobSettings.Tasks[1].Libraries[0], - &b.Config.Resources.Jobs["job2"].JobSettings.Tasks[0].Libraries[0], - }, out[filepath.Clean("testdata/library1")]) - - // Pointer equality for "library2" - assert.Equal(t, []*compute.Library{ - &b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[1], - &b.Config.Resources.Jobs["job1"].JobSettings.Tasks[1].Libraries[1], - &b.Config.Resources.Jobs["job2"].JobSettings.Tasks[0].Libraries[1], - }, out[filepath.Clean("testdata/library2")]) -} diff --git a/bundle/libraries/local_path.go b/bundle/libraries/local_path.go index a5c0cc969..f1e3788f2 100644 --- a/bundle/libraries/local_path.go +++ b/bundle/libraries/local_path.go @@ -38,6 +38,25 @@ func IsLocalPath(p string) bool { return !path.IsAbs(p) } +// IsEnvironmentDependencyLocal returns true if the specified dependency +// should be interpreted as a local path. +// We use this to check if the dependency in environment spec is local. +// We can't use IsLocalPath beacuse environment dependencies can be +// a pypi package name which can be misinterpreted as a local path by IsLocalPath. +func IsEnvironmentDependencyLocal(dep string) bool { + possiblePrefixes := []string{ + ".", + } + + for _, prefix := range possiblePrefixes { + if strings.HasPrefix(dep, prefix) { + return true + } + } + + return false +} + func isRemoteStorageScheme(path string) bool { url, err := url.Parse(path) if err != nil { diff --git a/bundle/libraries/local_path_test.go b/bundle/libraries/local_path_test.go index 640afa85b..d2492d6b1 100644 --- a/bundle/libraries/local_path_test.go +++ b/bundle/libraries/local_path_test.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/compute" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestIsLocalPath(t *testing.T) { @@ -41,3 +42,31 @@ func TestIsLocalLibrary(t *testing.T) { // Empty. assert.False(t, IsLocalLibrary(&compute.Library{})) } + +func TestIsEnvironmentDependencyLocal(t *testing.T) { + testCases := [](struct { + path string + expected bool + }){ + {path: "./local/*.whl", expected: true}, + {path: ".\\local\\*.whl", expected: true}, + {path: "./local/mypath.whl", expected: true}, + {path: ".\\local\\mypath.whl", expected: true}, + {path: "../local/*.whl", expected: true}, + {path: "..\\local\\*.whl", expected: true}, + {path: "./../local/*.whl", expected: true}, + {path: ".\\..\\local\\*.whl", expected: true}, + {path: "../../local/*.whl", expected: true}, + {path: "..\\..\\local\\*.whl", expected: true}, + {path: "pypipackage", expected: false}, + {path: "pypipackage/test.whl", expected: false}, + {path: "pypipackage/*.whl", expected: false}, + {path: "/Volumes/catalog/schema/volume/path.whl", expected: false}, + {path: "/Workspace/my_project/dist.whl", expected: false}, + {path: "-r /Workspace/my_project/requirements.txt", expected: false}, + } + + for _, tc := range testCases { + require.Equal(t, IsEnvironmentDependencyLocal(tc.path), tc.expected) + } +} diff --git a/bundle/libraries/match.go b/bundle/libraries/match.go index d051e163c..096cdf4a5 100644 --- a/bundle/libraries/match.go +++ b/bundle/libraries/match.go @@ -2,44 +2,77 @@ package libraries import ( "context" + "fmt" + "path/filepath" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" ) type match struct { } -func MatchWithArtifacts() bundle.Mutator { +func ValidateLocalLibrariesExist() bundle.Mutator { return &match{} } func (a *match) Name() string { - return "libraries.MatchWithArtifacts" + return "libraries.ValidateLocalLibrariesExist" } func (a *match) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - tasks := findAllTasks(b) - for _, task := range tasks { - if isMissingRequiredLibraries(task) { - return diag.Errorf("task '%s' is missing required libraries. Please include your package code in task libraries block", task.TaskKey) + for _, job := range b.Config.Resources.Jobs { + err := validateEnvironments(job.Environments, b) + if err != nil { + return diag.FromErr(err) } - for j := range task.Libraries { - lib := &task.Libraries[j] - _, err := findArtifactFiles(ctx, lib, b) + + for _, task := range job.JobSettings.Tasks { + err := validateTaskLibraries(task.Libraries, b) if err != nil { return diag.FromErr(err) } } } + return nil } -func isMissingRequiredLibraries(task *jobs.Task) bool { - if task.Libraries != nil { - return false +func validateTaskLibraries(libs []compute.Library, b *bundle.Bundle) error { + for _, lib := range libs { + path := libraryPath(&lib) + if path == "" || !IsLocalPath(path) { + continue + } + + matches, err := filepath.Glob(filepath.Join(b.RootPath, path)) + if err != nil { + return err + } + + if len(matches) == 0 { + return fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libraryPath(&lib)) + } } - return task.PythonWheelTask != nil || task.SparkJarTask != nil + return nil +} + +func validateEnvironments(envs []jobs.JobEnvironment, b *bundle.Bundle) error { + for _, env := range envs { + for _, dep := range env.Spec.Dependencies { + matches, err := filepath.Glob(filepath.Join(b.RootPath, dep)) + if err != nil { + return err + } + + if len(matches) == 0 && IsEnvironmentDependencyLocal(dep) { + return fmt.Errorf("file %s is referenced in environments section but doesn't exist on the local file system", dep) + } + } + } + + return nil } diff --git a/bundle/libraries/match_test.go b/bundle/libraries/match_test.go index 828c65640..bb4b15107 100644 --- a/bundle/libraries/match_test.go +++ b/bundle/libraries/match_test.go @@ -1 +1,148 @@ package libraries + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestValidateEnvironments(t *testing.T) { + tmpDir := t.TempDir() + testutil.Touch(t, tmpDir, "wheel.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + "./wheel.whl", + "simplejson", + "/Workspace/Users/foo@bar.com/artifacts/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + require.Nil(t, diags) +} + +func TestValidateEnvironmentsNoFile(t *testing.T) { + tmpDir := t.TempDir() + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + "./wheel.whl", + "simplejson", + "/Workspace/Users/foo@bar.com/artifacts/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + require.Len(t, diags, 1) + require.Equal(t, "file ./wheel.whl is referenced in environments section but doesn't exist on the local file system", diags[0].Summary) +} + +func TestValidateTaskLibraries(t *testing.T) { + tmpDir := t.TempDir() + testutil.Touch(t, tmpDir, "wheel.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: "./wheel.whl", + }, + { + Whl: "/Workspace/Users/foo@bar.com/artifacts/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + require.Nil(t, diags) +} + +func TestValidateTaskLibrariesNoFile(t *testing.T) { + tmpDir := t.TempDir() + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: "./wheel.whl", + }, + { + Whl: "/Workspace/Users/foo@bar.com/artifacts/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + require.Len(t, diags, 1) + require.Equal(t, "file ./wheel.whl is referenced in libraries section but doesn't exist on the local file system", diags[0].Summary) +} diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index de94c5a0e..fce98b038 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -26,7 +26,7 @@ func Deploy() bundle.Mutator { terraform.StatePull(), deploy.StatePull(), mutator.ValidateGitDetails(), - libraries.MatchWithArtifacts(), + libraries.ValidateLocalLibrariesExist(), artifacts.CleanUp(), artifacts.UploadAll(), python.TransformWheelTask(), diff --git a/bundle/python/transform.go b/bundle/python/transform.go index 728d4e83d..457b45f78 100644 --- a/bundle/python/transform.go +++ b/bundle/python/transform.go @@ -104,7 +104,7 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey { // At this point of moment we don't have local paths in Libraries sections anymore // Local paths have been replaced with the remote when the artifacts where uploaded // in artifacts.UploadAll mutator. - if task.PythonWheelTask == nil || !needsTrampoline(task) { + if task.PythonWheelTask == nil || !needsTrampoline(*task) { continue } @@ -117,7 +117,7 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey { return result } -func needsTrampoline(task *jobs.Task) bool { +func needsTrampoline(task jobs.Task) bool { return libraries.IsTaskWithWorkspaceLibraries(task) } diff --git a/bundle/tests/enviroment_key_test.go b/bundle/tests/enviroment_key_test.go new file mode 100644 index 000000000..3e12ddb68 --- /dev/null +++ b/bundle/tests/enviroment_key_test.go @@ -0,0 +1,12 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEnvironmentKeySupported(t *testing.T) { + _, diags := loadTargetWithDiags("./python_wheel/environment_key", "default") + require.Empty(t, diags) +} diff --git a/bundle/tests/python_wheel/environment_key/.gitignore b/bundle/tests/python_wheel/environment_key/.gitignore new file mode 100644 index 000000000..f03e23bc2 --- /dev/null +++ b/bundle/tests/python_wheel/environment_key/.gitignore @@ -0,0 +1,3 @@ +build/ +*.egg-info +.databricks diff --git a/bundle/tests/python_wheel/environment_key/databricks.yml b/bundle/tests/python_wheel/environment_key/databricks.yml new file mode 100644 index 000000000..198f8c0d2 --- /dev/null +++ b/bundle/tests/python_wheel/environment_key/databricks.yml @@ -0,0 +1,26 @@ +bundle: + name: environment_key + +artifacts: + my_test_code: + type: whl + path: "./my_test_code" + build: "python3 setup.py bdist_wheel" + +resources: + jobs: + test_job: + name: "My Wheel Job" + tasks: + - task_key: TestTask + existing_cluster_id: "0717-132531-5opeqon1" + python_wheel_task: + package_name: "my_test_code" + entry_point: "run" + environment_key: "test_env" + environments: + - environment_key: "test_env" + spec: + client: "1" + dependencies: + - ./my_test_code/dist/*.whl diff --git a/bundle/tests/python_wheel/environment_key/my_test_code/setup.py b/bundle/tests/python_wheel/environment_key/my_test_code/setup.py new file mode 100644 index 000000000..0bd871dd3 --- /dev/null +++ b/bundle/tests/python_wheel/environment_key/my_test_code/setup.py @@ -0,0 +1,15 @@ +from setuptools import setup, find_packages + +import src + +setup( + name="my_test_code", + version=src.__version__, + author=src.__author__, + url="https://databricks.com", + author_email="john.doe@databricks.com", + description="my test wheel", + packages=find_packages(include=["src"]), + entry_points={"group_1": "run=src.__main__:main"}, + install_requires=["setuptools"], +) diff --git a/bundle/tests/python_wheel/environment_key/my_test_code/src/__init__.py b/bundle/tests/python_wheel/environment_key/my_test_code/src/__init__.py new file mode 100644 index 000000000..909f1f322 --- /dev/null +++ b/bundle/tests/python_wheel/environment_key/my_test_code/src/__init__.py @@ -0,0 +1,2 @@ +__version__ = "0.0.1" +__author__ = "Databricks" diff --git a/bundle/tests/python_wheel/environment_key/my_test_code/src/__main__.py b/bundle/tests/python_wheel/environment_key/my_test_code/src/__main__.py new file mode 100644 index 000000000..73d045afb --- /dev/null +++ b/bundle/tests/python_wheel/environment_key/my_test_code/src/__main__.py @@ -0,0 +1,16 @@ +""" +The entry point of the Python Wheel +""" + +import sys + + +def main(): + # This method will print the provided arguments + print('Hello from my func') + print('Got arguments:') + print(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/bundle/tests/python_wheel_test.go b/bundle/tests/python_wheel_test.go index e2266516a..8d0036a7b 100644 --- a/bundle/tests/python_wheel_test.go +++ b/bundle/tests/python_wheel_test.go @@ -23,7 +23,7 @@ func TestPythonWheelBuild(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(matches)) - match := libraries.MatchWithArtifacts() + match := libraries.ValidateLocalLibrariesExist() diags = bundle.Apply(ctx, b, match) require.NoError(t, diags.Error()) } @@ -40,7 +40,7 @@ func TestPythonWheelBuildAutoDetect(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(matches)) - match := libraries.MatchWithArtifacts() + match := libraries.ValidateLocalLibrariesExist() diags = bundle.Apply(ctx, b, match) require.NoError(t, diags.Error()) } @@ -53,7 +53,7 @@ func TestPythonWheelWithDBFSLib(t *testing.T) { diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) require.NoError(t, diags.Error()) - match := libraries.MatchWithArtifacts() + match := libraries.ValidateLocalLibrariesExist() diags = bundle.Apply(ctx, b, match) require.NoError(t, diags.Error()) } @@ -66,7 +66,7 @@ func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) { diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) require.NoError(t, diags.Error()) - match := libraries.MatchWithArtifacts() + match := libraries.ValidateLocalLibrariesExist() diags = bundle.Apply(ctx, b, match) require.ErrorContains(t, diags.Error(), "./non-existing/*.whl") @@ -79,3 +79,20 @@ func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) { "my_test_code-0.0.1-py3-none-any.whl", )) } + +func TestPythonWheelBuildWithEnvironmentKey(t *testing.T) { + ctx := context.Background() + b, err := bundle.Load(ctx, "./python_wheel/environment_key") + require.NoError(t, err) + + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) + require.NoError(t, diags.Error()) + + matches, err := filepath.Glob("./python_wheel/environment_key/my_test_code/dist/my_test_code-*.whl") + require.NoError(t, err) + require.Equal(t, 1, len(matches)) + + match := libraries.ValidateLocalLibrariesExist() + diags = bundle.Apply(ctx, b, match) + require.NoError(t, diags.Error()) +} diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 866a1f6e9..222b23047 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -89,3 +89,67 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl, ) } + +func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + dir := t.TempDir() + whlPath := filepath.Join(dir, "dist", "test.whl") + touchEmptyFile(t, whlPath) + + wsDir := internal.TemporaryWorkspaceDir(t, w) + + b := &bundle.Bundle{ + RootPath: dir, + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + }, + Workspace: config.Workspace{ + ArtifactPath: wsDir, + }, + Artifacts: config.Artifacts{ + "test": &config.Artifact{ + Type: "whl", + Files: []config.ArtifactFile{ + { + Source: whlPath, + }, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test": { + JobSettings: &jobs.JobSettings{ + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + "dist/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) + require.NoError(t, diags.Error()) + + // The remote path attribute on the artifact file should have been set. + require.Regexp(t, + regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + b.Config.Artifacts["test"].Files[0].RemotePath, + ) + + // The job environment deps path should have been updated to the remote path. + require.Regexp(t, + regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + b.Config.Resources.Jobs["test"].JobSettings.Environments[0].Spec.Dependencies[0], + ) +} diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json b/internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json new file mode 100644 index 000000000..ae765c58f --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json @@ -0,0 +1,13 @@ +{ + "properties": { + "project_name": { + "type": "string", + "default": "my_test_code", + "description": "Unique name for this project" + }, + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + } + } +} diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl b/internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl new file mode 100644 index 000000000..4a674dce0 --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl @@ -0,0 +1,25 @@ +bundle: + name: wheel-task-with-environments + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + +resources: + jobs: + some_other_job: + name: "[${bundle.target}] Test Wheel Job With Environments {{.unique_id}}" + tasks: + - task_key: TestTask + python_wheel_task: + package_name: my_test_code + entry_point: run + parameters: + - "one" + - "two" + environment_key: "test" + environments: + - environment_key: "test" + spec: + client: "1" + dependencies: + - ./dist/*.whl diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl b/internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl new file mode 100644 index 000000000..b528657b1 --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl @@ -0,0 +1,15 @@ +from setuptools import setup, find_packages + +import {{.project_name}} + +setup( + name="{{.project_name}}", + version={{.project_name}}.__version__, + author={{.project_name}}.__author__, + url="https://databricks.com", + author_email="john.doe@databricks.com", + description="my example wheel", + packages=find_packages(include=["{{.project_name}}"]), + entry_points={"group1": "run={{.project_name}}.__main__:main"}, + install_requires=["setuptools"], +) diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py b/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py new file mode 100644 index 000000000..909f1f322 --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py @@ -0,0 +1,2 @@ +__version__ = "0.0.1" +__author__ = "Databricks" diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py b/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py new file mode 100644 index 000000000..ea918ce2d --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py @@ -0,0 +1,16 @@ +""" +The entry point of the Python Wheel +""" + +import sys + + +def main(): + # This method will print the provided arguments + print("Hello from my func") + print("Got arguments:") + print(sys.argv) + + +if __name__ == "__main__": + main() diff --git a/internal/bundle/environments_test.go b/internal/bundle/environments_test.go new file mode 100644 index 000000000..5cffe8857 --- /dev/null +++ b/internal/bundle/environments_test.go @@ -0,0 +1,39 @@ +package bundle + +import ( + "testing" + + "github.com/databricks/cli/internal/acc" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) { + t.Skip("Skipping test until serveless is enabled") + + ctx, _ := acc.WorkspaceTest(t) + + bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{ + "unique_id": uuid.New().String(), + }) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + t.Cleanup(func() { + destroyBundle(t, ctx, bundleRoot) + }) + + out, err := runResource(t, ctx, bundleRoot, "some_other_job") + require.NoError(t, err) + require.Contains(t, out, "Hello from my func") + require.Contains(t, out, "Got arguments:") + require.Contains(t, out, "['my_test_code', 'one', 'two']") + + out, err = runResourceWithParams(t, ctx, bundleRoot, "some_other_job", "--python-params=param1,param2") + require.NoError(t, err) + require.Contains(t, out, "Hello from my func") + require.Contains(t, out, "Got arguments:") + require.Contains(t, out, "['my_test_code', 'param1', 'param2']") +} diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index 1299194b2..bf2462920 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -43,7 +43,6 @@ func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bo } func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { - // This is the first DBR version where we can install Python wheels from the Workspace File System. runPythonWheelTest(t, "13.3.x-snapshot-scala2.12", false) } From 3108883a8f8e96e56f46497de9ed67c947988f13 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 22 Apr 2024 13:50:13 +0200 Subject: [PATCH 157/286] Processing and completion of positional args to bundle run (#1120) ## Changes With this change, both job parameters and task parameters can be specified as positional arguments to bundle run. How the positional arguments are interpreted depends on the configuration of the job. ### Examples: For a job that has job parameters configured a user can specify: ``` databricks bundle run my_job -- --param1=value1 --param2=value2 ``` And the run is kicked off with job parameters set to: ```json { "param1": "value1", "param2": "value2" } ``` Similarly, for a job that doesn't use job parameters and only has `notebook_task` tasks, a user can specify: ``` databricks bundle run my_notebook_job -- --param1=value1 --param2=value2 ``` And the run is kicked off with task level `notebook_params` configured as: ```json { "param1": "value1", "param2": "value2" } ``` For a job that doesn't doesn't use job parameters and only has either `spark_python_task` or `python_wheel_task` tasks, a user can specify: ``` databricks bundle run my_python_file_job -- --flag=value other arguments ``` And the run is kicked off with task level `python_params` configured as: ```json [ "--flag=value", "other", "arguments" ] ``` The same is applied to jobs with only `spark_jar_task` or `spark_submit_task` tasks. ## Tests Unit tests. Tested the completions manually. --- bundle/run/args.go | 127 ++++++++++++++++++++ bundle/run/args_test.go | 134 ++++++++++++++++++++++ bundle/run/job.go | 9 ++ bundle/run/job_args.go | 184 +++++++++++++++++++++++++++++ bundle/run/job_args_test.go | 223 ++++++++++++++++++++++++++++++++++++ bundle/run/pipeline.go | 13 +++ bundle/run/runner.go | 3 + cmd/bundle/run.go | 45 ++++++-- 8 files changed, 730 insertions(+), 8 deletions(-) create mode 100644 bundle/run/args.go create mode 100644 bundle/run/args_test.go create mode 100644 bundle/run/job_args.go create mode 100644 bundle/run/job_args_test.go diff --git a/bundle/run/args.go b/bundle/run/args.go new file mode 100644 index 000000000..2885cda01 --- /dev/null +++ b/bundle/run/args.go @@ -0,0 +1,127 @@ +package run + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// argsHandler defines the (unexported) interface for the runners in this +// package to implement to handle context-specific positional arguments. +// +// For jobs, this means: +// - If a job uses job parameters: parse positional arguments into key-value pairs +// and pass them as job parameters. +// - If a job does not use job parameters AND only has Spark Python tasks: +// pass through the positional arguments as a list of Python parameters. +// - If a job does not use job parameters AND only has notebook tasks: +// parse arguments into key-value pairs and pass them as notebook parameters. +// - ... +// +// In all cases, we may be able to provide context-aware argument completions. +type argsHandler interface { + // Parse additional positional arguments. + ParseArgs(args []string, opts *Options) error + + // Complete additional positional arguments. + CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) +} + +// nopArgsHandler is a no-op implementation of [argsHandler]. +// It returns an error if any positional arguments are present and doesn't complete anything. +type nopArgsHandler struct{} + +func (nopArgsHandler) ParseArgs(args []string, opts *Options) error { + if len(args) == 0 { + return nil + } + + return fmt.Errorf("received %d unexpected positional arguments", len(args)) +} + +func (nopArgsHandler) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +// argsToKeyValueMap parses key-value pairs from the specified arguments. +// +// It accepts these formats: +// - `--key=value` +// - `--key`, `value` +// +// Remaining arguments are returned as-is. +func argsToKeyValueMap(args []string) (map[string]string, []string) { + kv := make(map[string]string) + key := "" + tail := args + + for i, arg := range args { + // If key is set; use the next argument as value. + if key != "" { + kv[key] = arg + key = "" + tail = args[i+1:] + continue + } + + if strings.HasPrefix(arg, "--") { + parts := strings.SplitN(arg[2:], "=", 2) + if len(parts) == 2 { + kv[parts[0]] = parts[1] + tail = args[i+1:] + continue + } + + // Use this argument as key, the next as value. + key = parts[0] + continue + } + + // If we cannot interpret it; return here. + break + } + + return kv, tail +} + +// genericParseKeyValueArgs parses key-value pairs from the specified arguments. +// If there are any positional arguments left, it returns an error. +func genericParseKeyValueArgs(args []string) (map[string]string, error) { + kv, args := argsToKeyValueMap(args) + if len(args) > 0 { + return nil, fmt.Errorf("received %d unexpected positional arguments", len(args)) + } + + return kv, nil +} + +// genericCompleteKeyValueArgs completes key-value pairs from the specified arguments. +// Completion options that are already specified are skipped. +func genericCompleteKeyValueArgs(args []string, toComplete string, options []string) ([]string, cobra.ShellCompDirective) { + // If the string to complete contains an equals sign, then we are + // completing the value part (which we don't know here). + if strings.Contains(toComplete, "=") { + return nil, cobra.ShellCompDirectiveNoFileComp + } + + // Remove already completed key/value pairs. + kv, args := argsToKeyValueMap(args) + + // If the list of remaining args is empty, return possible completions. + if len(args) == 0 { + var completions []string + for _, option := range options { + // Skip options that have already been specified. + if _, ok := kv[option]; ok { + continue + } + completions = append(completions, fmt.Sprintf("--%s=", option)) + } + // Note: we include cobra.ShellCompDirectiveNoSpace to suggest including + // the value part right after the equals sign. + return completions, cobra.ShellCompDirectiveNoFileComp | cobra.ShellCompDirectiveNoSpace + } + + return nil, cobra.ShellCompDirectiveNoFileComp +} diff --git a/bundle/run/args_test.go b/bundle/run/args_test.go new file mode 100644 index 000000000..aff14b481 --- /dev/null +++ b/bundle/run/args_test.go @@ -0,0 +1,134 @@ +package run + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNopArgsHandler(t *testing.T) { + h := nopArgsHandler{} + opts := &Options{} + + // No error if no positional arguments are passed. + err := h.ParseArgs([]string{}, opts) + assert.NoError(t, err) + + // Error if any positional arguments are passed. + err = h.ParseArgs([]string{"foo"}, opts) + assert.EqualError(t, err, "received 1 unexpected positional arguments") + + // No completions. + completions, _ := h.CompleteArgs([]string{}, "") + assert.Nil(t, completions) +} + +func TestArgsToKeyValueMap(t *testing.T) { + for _, tc := range []struct { + input []string + expected map[string]string + tail []string + err error + }{ + { + input: []string{}, + expected: map[string]string{}, + tail: []string{}, + }, + { + input: []string{"--foo=bar", "--baz", "qux"}, + expected: map[string]string{ + "foo": "bar", + "baz": "qux", + }, + tail: []string{}, + }, + { + input: []string{"--foo=bar", "--baz", "qux", "tail"}, + expected: map[string]string{ + "foo": "bar", + "baz": "qux", + }, + tail: []string{"tail"}, + }, + { + input: []string{"--foo=bar", "--baz", "qux", "tail", "--foo=bar"}, + expected: map[string]string{ + "foo": "bar", + "baz": "qux", + }, + tail: []string{"tail", "--foo=bar"}, + }, + { + input: []string{"--foo=bar", "--baz=qux"}, + expected: map[string]string{ + "foo": "bar", + "baz": "qux", + }, + tail: []string{}, + }, + { + input: []string{"--foo=bar", "--baz=--qux"}, + expected: map[string]string{ + "foo": "bar", + "baz": "--qux", + }, + tail: []string{}, + }, + { + input: []string{"--foo=bar", "--baz="}, + expected: map[string]string{ + "foo": "bar", + "baz": "", + }, + tail: []string{}, + }, + { + input: []string{"--foo=bar", "--baz"}, + expected: map[string]string{ + "foo": "bar", + }, + tail: []string{"--baz"}, + }, + } { + actual, tail := argsToKeyValueMap(tc.input) + assert.Equal(t, tc.expected, actual) + assert.Equal(t, tc.tail, tail) + } +} + +func TestGenericParseKeyValueArgs(t *testing.T) { + kv, err := genericParseKeyValueArgs([]string{"--foo=bar", "--baz", "qux"}) + assert.NoError(t, err) + assert.Equal(t, map[string]string{ + "foo": "bar", + "baz": "qux", + }, kv) + + _, err = genericParseKeyValueArgs([]string{"--foo=bar", "--baz", "qux", "tail"}) + assert.EqualError(t, err, "received 1 unexpected positional arguments") +} + +func TestGenericCompleteKeyValueArgs(t *testing.T) { + var completions []string + + // Complete nothing if there are no options. + completions, _ = genericCompleteKeyValueArgs([]string{}, ``, []string{}) + assert.Empty(t, completions) + + // Complete nothing if we're in the middle of a key-value pair (as single argument with equals sign). + completions, _ = genericCompleteKeyValueArgs([]string{}, `--foo=`, []string{`foo`, `bar`}) + assert.Empty(t, completions) + + // Complete nothing if we're in the middle of a key-value pair (as two arguments). + completions, _ = genericCompleteKeyValueArgs([]string{`--foo`}, ``, []string{`foo`, `bar`}) + assert.Empty(t, completions) + + // Complete if we're at the beginning. + completions, _ = genericCompleteKeyValueArgs([]string{}, ``, []string{`foo`, `bar`}) + assert.Equal(t, []string{`--foo=`, `--bar=`}, completions) + + // Complete if we have already one key-value pair. + completions, _ = genericCompleteKeyValueArgs([]string{`--foo=bar`}, ``, []string{`foo`, `bar`}) + assert.Equal(t, []string{`--bar=`}, completions) +} diff --git a/bundle/run/job.go b/bundle/run/job.go index 043ea846a..8003c7d29 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -15,6 +15,7 @@ import ( "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/fatih/color" + "github.com/spf13/cobra" "golang.org/x/sync/errgroup" ) @@ -315,3 +316,11 @@ func (r *jobRunner) Cancel(ctx context.Context) error { return errGroup.Wait() } + +func (r *jobRunner) ParseArgs(args []string, opts *Options) error { + return r.posArgsHandler().ParseArgs(args, opts) +} + +func (r *jobRunner) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return r.posArgsHandler().CompleteArgs(args, toComplete) +} diff --git a/bundle/run/job_args.go b/bundle/run/job_args.go new file mode 100644 index 000000000..85cf96efb --- /dev/null +++ b/bundle/run/job_args.go @@ -0,0 +1,184 @@ +package run + +import ( + "github.com/databricks/cli/bundle/config/resources" + "github.com/spf13/cobra" + "golang.org/x/exp/maps" +) + +type jobParameterArgs struct { + *resources.Job +} + +func (a jobParameterArgs) ParseArgs(args []string, opts *Options) error { + kv, err := genericParseKeyValueArgs(args) + if err != nil { + return err + } + + // Merge the key-value pairs from the args into the options struct. + if opts.Job.jobParams == nil { + opts.Job.jobParams = kv + } else { + for k, v := range kv { + opts.Job.jobParams[k] = v + } + } + return nil +} + +func (a jobParameterArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var completions []string + for _, param := range a.Parameters { + completions = append(completions, param.Name) + } + return genericCompleteKeyValueArgs(args, toComplete, completions) +} + +type jobTaskNotebookParamArgs struct { + *resources.Job +} + +func (a jobTaskNotebookParamArgs) ParseArgs(args []string, opts *Options) error { + kv, err := genericParseKeyValueArgs(args) + if err != nil { + return err + } + + // Merge the key-value pairs from the args into the options struct. + if opts.Job.notebookParams == nil { + opts.Job.notebookParams = kv + } else { + for k, v := range kv { + opts.Job.notebookParams[k] = v + } + } + return nil +} + +func (a jobTaskNotebookParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + parameters := make(map[string]string) + for _, t := range a.Tasks { + if nt := t.NotebookTask; nt != nil { + maps.Copy(parameters, nt.BaseParameters) + } + } + return genericCompleteKeyValueArgs(args, toComplete, maps.Keys(parameters)) +} + +type jobTaskJarParamArgs struct { + *resources.Job +} + +func (a jobTaskJarParamArgs) ParseArgs(args []string, opts *Options) error { + opts.Job.jarParams = append(opts.Job.jarParams, args...) + return nil +} + +func (a jobTaskJarParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +type jobTaskPythonParamArgs struct { + *resources.Job +} + +func (a jobTaskPythonParamArgs) ParseArgs(args []string, opts *Options) error { + opts.Job.pythonParams = append(opts.Job.pythonParams, args...) + return nil +} + +func (a jobTaskPythonParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +type jobTaskSparkSubmitParamArgs struct { + *resources.Job +} + +func (a jobTaskSparkSubmitParamArgs) ParseArgs(args []string, opts *Options) error { + opts.Job.sparkSubmitParams = append(opts.Job.sparkSubmitParams, args...) + return nil +} + +func (a jobTaskSparkSubmitParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +type jobTaskType int + +const ( + jobTaskTypeNotebook jobTaskType = iota + 1 + jobTaskTypeSparkJar + jobTaskTypeSparkPython + jobTaskTypeSparkSubmit + jobTaskTypePipeline + jobTaskTypePythonWheel + jobTaskTypeSql + jobTaskTypeDbt + jobTaskTypeRunJob +) + +func (r *jobRunner) posArgsHandler() argsHandler { + job := r.job + if job == nil || job.JobSettings == nil { + return nopArgsHandler{} + } + + // Handle job parameters, if any are defined. + if len(job.Parameters) > 0 { + return &jobParameterArgs{job} + } + + // Handle task parameters otherwise. + var seen = make(map[jobTaskType]bool) + for _, t := range job.Tasks { + if t.NotebookTask != nil { + seen[jobTaskTypeNotebook] = true + } + if t.SparkJarTask != nil { + seen[jobTaskTypeSparkJar] = true + } + if t.SparkPythonTask != nil { + seen[jobTaskTypeSparkPython] = true + } + if t.SparkSubmitTask != nil { + seen[jobTaskTypeSparkSubmit] = true + } + if t.PipelineTask != nil { + seen[jobTaskTypePipeline] = true + } + if t.PythonWheelTask != nil { + seen[jobTaskTypePythonWheel] = true + } + if t.SqlTask != nil { + seen[jobTaskTypeSql] = true + } + if t.DbtTask != nil { + seen[jobTaskTypeDbt] = true + } + if t.RunJobTask != nil { + seen[jobTaskTypeRunJob] = true + } + } + + // Cannot handle positional arguments if we have more than one task type. + keys := maps.Keys(seen) + if len(keys) != 1 { + return nopArgsHandler{} + } + + switch keys[0] { + case jobTaskTypeNotebook: + return jobTaskNotebookParamArgs{job} + case jobTaskTypeSparkJar: + return jobTaskJarParamArgs{job} + case jobTaskTypeSparkPython, jobTaskTypePythonWheel: + return jobTaskPythonParamArgs{job} + case jobTaskTypeSparkSubmit: + return jobTaskSparkSubmitParamArgs{job} + default: + // No positional argument handling for other task types. + return nopArgsHandler{} + } +} diff --git a/bundle/run/job_args_test.go b/bundle/run/job_args_test.go new file mode 100644 index 000000000..709994907 --- /dev/null +++ b/bundle/run/job_args_test.go @@ -0,0 +1,223 @@ +package run + +import ( + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" +) + +func TestJobParameterArgs(t *testing.T) { + a := jobParameterArgs{ + &resources.Job{ + JobSettings: &jobs.JobSettings{ + Parameters: []jobs.JobParameterDefinition{ + { + Name: "foo", + Default: "value", + }, + { + Name: "bar", + Default: "value", + }, + }, + }, + }, + } + + t.Run("ParseArgsError", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"--p1=v1", "superfluous"}, &opts) + assert.ErrorContains(t, err, "unexpected positional arguments") + }) + + t.Run("ParseArgs", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"--p1=v1", "--p2=v2"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + map[string]string{ + "p1": "v1", + "p2": "v2", + }, + opts.Job.jobParams, + ) + }) + + t.Run("ParseArgsAppend", func(t *testing.T) { + var opts Options + opts.Job.jobParams = map[string]string{"p1": "v1"} + err := a.ParseArgs([]string{"--p2=v2"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + map[string]string{ + "p1": "v1", + "p2": "v2", + }, + opts.Job.jobParams, + ) + }) + + t.Run("CompleteArgs", func(t *testing.T) { + completions, _ := a.CompleteArgs([]string{}, "") + assert.Equal(t, []string{"--foo=", "--bar="}, completions) + }) +} + +func TestJobTaskNotebookParamArgs(t *testing.T) { + a := jobTaskNotebookParamArgs{ + &resources.Job{ + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + NotebookTask: &jobs.NotebookTask{ + BaseParameters: map[string]string{ + "foo": "value", + "bar": "value", + }, + }, + }, + }, + }, + }, + } + + t.Run("ParseArgsError", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"--p1=v1", "superfluous"}, &opts) + assert.ErrorContains(t, err, "unexpected positional arguments") + }) + + t.Run("ParseArgs", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"--p1=v1", "--p2=v2"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + map[string]string{ + "p1": "v1", + "p2": "v2", + }, + opts.Job.notebookParams, + ) + }) + + t.Run("ParseArgsAppend", func(t *testing.T) { + var opts Options + opts.Job.notebookParams = map[string]string{"p1": "v1"} + err := a.ParseArgs([]string{"--p2=v2"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + map[string]string{ + "p1": "v1", + "p2": "v2", + }, + opts.Job.notebookParams, + ) + }) + + t.Run("CompleteArgs", func(t *testing.T) { + completions, _ := a.CompleteArgs([]string{}, "") + assert.ElementsMatch(t, []string{"--foo=", "--bar="}, completions) + }) +} + +func TestJobTaskJarParamArgs(t *testing.T) { + a := jobTaskJarParamArgs{} + + t.Run("ParseArgs", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"foo", "bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.jarParams, + ) + }) + + t.Run("ParseArgsAppend", func(t *testing.T) { + var opts Options + opts.Job.jarParams = []string{"foo"} + err := a.ParseArgs([]string{"bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.jarParams, + ) + }) + + t.Run("CompleteArgs", func(t *testing.T) { + completions, _ := a.CompleteArgs([]string{}, "") + assert.Empty(t, completions) + }) +} + +func TestJobTaskPythonParamArgs(t *testing.T) { + a := jobTaskPythonParamArgs{} + + t.Run("ParseArgs", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"foo", "bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.pythonParams, + ) + }) + + t.Run("ParseArgsAppend", func(t *testing.T) { + var opts Options + opts.Job.pythonParams = []string{"foo"} + err := a.ParseArgs([]string{"bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.pythonParams, + ) + }) + + t.Run("CompleteArgs", func(t *testing.T) { + completions, _ := a.CompleteArgs([]string{}, "") + assert.Empty(t, completions) + }) +} + +func TestJobTaskSparkSubmitParamArgs(t *testing.T) { + a := jobTaskSparkSubmitParamArgs{} + + t.Run("ParseArgs", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"foo", "bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.sparkSubmitParams, + ) + }) + + t.Run("ParseArgsAppend", func(t *testing.T) { + var opts Options + opts.Job.sparkSubmitParams = []string{"foo"} + err := a.ParseArgs([]string{"bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.sparkSubmitParams, + ) + }) + + t.Run("CompleteArgs", func(t *testing.T) { + completions, _ := a.CompleteArgs([]string{}, "") + assert.Empty(t, completions) + }) +} diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index e1f5bfe5f..4e29b9f3f 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/spf13/cobra" ) func filterEventsByUpdateId(events []pipelines.PipelineEvent, updateId string) []pipelines.PipelineEvent { @@ -181,3 +182,15 @@ func (r *pipelineRunner) Cancel(ctx context.Context) error { _, err = wait.GetWithTimeout(jobRunTimeout) return err } + +func (r *pipelineRunner) ParseArgs(args []string, opts *Options) error { + if len(args) == 0 { + return nil + } + + return fmt.Errorf("received %d unexpected positional arguments", len(args)) +} + +func (r *pipelineRunner) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} diff --git a/bundle/run/runner.go b/bundle/run/runner.go index de2a1ae7a..0f202ce7d 100644 --- a/bundle/run/runner.go +++ b/bundle/run/runner.go @@ -29,6 +29,9 @@ type Runner interface { // Cancel the underlying workflow. Cancel(ctx context.Context) error + + // Runners support parsing and completion of additional positional arguments. + argsHandler } // Find locates a runner matching the specified argument. diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index e6a8e1ba4..63458f85c 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -18,8 +18,26 @@ import ( func newRunCommand() *cobra.Command { cmd := &cobra.Command{ Use: "run [flags] KEY", - Short: "Run a resource (e.g. a job or a pipeline)", - Args: root.MaximumNArgs(1), + Short: "Run a job or pipeline update", + Long: `Run the job or pipeline identified by KEY. + +The KEY is the unique identifier of the resource to run. In addition to +customizing the run using any of the available flags, you can also specify +keyword or positional arguments as shown in these examples: + + databricks bundle run my_job -- --key1 value1 --key2 value2 + +Or: + + databricks bundle run my_job -- value1 value2 value3 + +If the specified job uses job parameters or the job has a notebook task with +parameters, the first example applies and flag names are mapped to the +parameter names. + +If the specified job does not use job parameters and the job has a Python file +task or a Python wheel task, the second example applies. +`, } var runOptions run.Options @@ -62,7 +80,7 @@ func newRunCommand() *cobra.Command { args = append(args, id) } - if len(args) != 1 { + if len(args) < 1 { return fmt.Errorf("expected a KEY of the resource to run") } @@ -71,6 +89,12 @@ func newRunCommand() *cobra.Command { return err } + // Parse additional positional arguments. + err = runner.ParseArgs(args[1:], &runOptions) + if err != nil { + return err + } + runOptions.NoWait = noWait if restart { s := cmdio.Spinner(ctx) @@ -107,10 +131,6 @@ func newRunCommand() *cobra.Command { } cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) > 0 { - return nil, cobra.ShellCompDirectiveNoFileComp - } - b, diags := root.MustConfigureBundle(cmd) if err := diags.Error(); err != nil { cobra.CompErrorln(err.Error()) @@ -123,7 +143,16 @@ func newRunCommand() *cobra.Command { return nil, cobra.ShellCompDirectiveNoFileComp } - return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp + if len(args) == 0 { + return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp + } else { + // If we know the resource to run, we can complete additional positional arguments. + runner, err := run.Find(b, args[0]) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + return runner.CompleteArgs(args[1:], toComplete) + } } return cmd From 1d9bf4b2c42a77f9b58b72fb0b264167433f7a4e Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 22 Apr 2024 17:21:41 +0530 Subject: [PATCH 158/286] Add legacy option for `run_as` (#1384) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This PR partially reverts the changes in https://github.com/databricks/cli/pull/1233 and puts the old code under an "experimental.use_legacy_run_as" configuration. This gives customers who ran into the breaking change made in the PR a way out. ## Tests Both manually and via unit tests. Manually verified that run_as works for pipelines now. And if a user wants to use the feature they need to be both a Metastore and a workspace admin. --------- Error when the deploying user is a workspace admin but not a metastore admin: ``` Error: terraform apply: exit status 1 Error: cannot update permissions: User is not a metastore admin for Metastore 'deco-uc-prod-aws-us-east-1'. with databricks_permissions.pipeline_foo, on bundle.tf.json line 23, in resource.databricks_permissions.pipeline_foo: 23: } ``` -------- Output of bundle validate: ``` ➜ bundle-playground git:(master) ✗ cli bundle validate Warning: You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the DLT pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC. at experimental.use_legacy_run_as in databricks.yml:13:22 Name: bundle-playground Target: default Workspace: Host: https://dbc-a39a1eb1-ef95.cloud.databricks.com User: shreyas.goenka@databricks.com Path: /Users/shreyas.goenka@databricks.com/.bundle/bundle-playground/default Found 1 warning ``` --- bundle/config/experimental.go | 13 +++++ bundle/config/mutator/run_as.go | 70 ++++++++++++++++++++--- bundle/tests/run_as/legacy/databricks.yml | 68 ++++++++++++++++++++++ bundle/tests/run_as_test.go | 51 +++++++++++++++++ 4 files changed, 193 insertions(+), 9 deletions(-) create mode 100644 bundle/tests/run_as/legacy/databricks.yml diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index 62d1ae731..008d7b909 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -10,6 +10,19 @@ type Experimental struct { // In this case the configured wheel task will be deployed as a notebook task which install defined wheel in runtime and executes it. // For more details see https://github.com/databricks/cli/pull/797 and https://github.com/databricks/cli/pull/635 PythonWheelWrapper bool `json:"python_wheel_wrapper,omitempty"` + + // Enable legacy run_as behavior. That is: + // - Set the run_as identity as the owner of any pipelines in the bundle. + // - Do not error in the presence of resources that do not support run_as. + // As of April 2024 this includes pipelines and model serving endpoints. + // + // This mode of run_as requires the deploying user to be a workspace and metastore + // admin. Use of this flag is not recommend for new bundles, and it is only provided + // to unblock customers that are stuck due to breaking changes in the run_as behavior + // made in https://github.com/databricks/cli/pull/1233. This flag might + // be removed in the future once we have a proper workaround like allowing IS_OWNER + // as a top-level permission in the DAB. + UseLegacyRunAs bool `json:"use_legacy_run_as,omitempty"` } type Command string diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index 8da233c27..c5b294b27 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -3,8 +3,10 @@ package mutator import ( "context" "fmt" + "slices" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -101,19 +103,12 @@ func validateRunAs(b *bundle.Bundle) error { return nil } -func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { - // Mutator is a no-op if run_as is not specified in the bundle +func setRunAsForJobs(b *bundle.Bundle) { runAs := b.Config.RunAs if runAs == nil { - return nil + return } - // Assert the run_as configuration is valid in the context of the bundle - if err := validateRunAs(b); err != nil { - return diag.FromErr(err) - } - - // Set run_as for jobs for i := range b.Config.Resources.Jobs { job := b.Config.Resources.Jobs[i] if job.RunAs != nil { @@ -124,6 +119,63 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { UserName: runAs.UserName, } } +} +// Legacy behavior of run_as for DLT pipelines. Available under the experimental.use_run_as_legacy flag. +// Only available to unblock customers stuck due to breaking changes in https://github.com/databricks/cli/pull/1233 +func setPipelineOwnersToRunAsIdentity(b *bundle.Bundle) { + runAs := b.Config.RunAs + if runAs == nil { + return + } + + me := b.Config.Workspace.CurrentUser.UserName + // If user deploying the bundle and the one defined in run_as are the same + // Do not add IS_OWNER permission. Current user is implied to be an owner in this case. + // Otherwise, it will fail due to this bug https://github.com/databricks/terraform-provider-databricks/issues/2407 + if runAs.UserName == me || runAs.ServicePrincipalName == me { + return + } + + for i := range b.Config.Resources.Pipelines { + pipeline := b.Config.Resources.Pipelines[i] + pipeline.Permissions = slices.DeleteFunc(pipeline.Permissions, func(p resources.Permission) bool { + return (runAs.ServicePrincipalName != "" && p.ServicePrincipalName == runAs.ServicePrincipalName) || + (runAs.UserName != "" && p.UserName == runAs.UserName) + }) + pipeline.Permissions = append(pipeline.Permissions, resources.Permission{ + Level: "IS_OWNER", + ServicePrincipalName: runAs.ServicePrincipalName, + UserName: runAs.UserName, + }) + } +} + +func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + // Mutator is a no-op if run_as is not specified in the bundle + runAs := b.Config.RunAs + if runAs == nil { + return nil + } + + if b.Config.Experimental != nil && b.Config.Experimental.UseLegacyRunAs { + setPipelineOwnersToRunAsIdentity(b) + setRunAsForJobs(b) + return diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: "You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the DLT pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC.", + Path: dyn.MustPathFromString("experimental.use_legacy_run_as"), + Location: b.Config.GetLocation("experimental.use_legacy_run_as"), + }, + } + } + + // Assert the run_as configuration is valid in the context of the bundle + if err := validateRunAs(b); err != nil { + return diag.FromErr(err) + } + + setRunAsForJobs(b) return nil } diff --git a/bundle/tests/run_as/legacy/databricks.yml b/bundle/tests/run_as/legacy/databricks.yml new file mode 100644 index 000000000..e47224dbb --- /dev/null +++ b/bundle/tests/run_as/legacy/databricks.yml @@ -0,0 +1,68 @@ +bundle: + name: "run_as" + +run_as: + service_principal_name: "my_service_principal" + +experimental: + use_legacy_run_as: true + +resources: + jobs: + job_one: + name: Job One + + tasks: + - task_key: "task_one" + notebook_task: + notebook_path: "./test.py" + + job_two: + name: Job Two + + tasks: + - task_key: "task_two" + notebook_task: + notebook_path: "./test.py" + + job_three: + name: Job Three + + run_as: + service_principal_name: "my_service_principal_for_job" + + tasks: + - task_key: "task_three" + notebook_task: + notebook_path: "./test.py" + + pipelines: + nyc_taxi_pipeline: + name: "nyc taxi loader" + + permissions: + - level: CAN_VIEW + service_principal_name: my_service_principal + - level: CAN_VIEW + user_name: my_user_name + + libraries: + - notebook: + path: ./dlt/nyc_taxi_loader + + + models: + model_one: + name: "skynet" + + registered_models: + model_two: + name: "skynet (in UC)" + + experiments: + experiment_one: + name: "experiment_one" + + model_serving_endpoints: + model_serving_one: + name: "skynet" diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 40359c17d..5ad7a89aa 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -13,6 +13,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/databricks/databricks-sdk-go/service/serving" "github.com/stretchr/testify/assert" ) @@ -233,3 +234,53 @@ func TestRunAsErrorNeitherUserOrSpSpecifiedAtTargetOverride(t *testing.T) { configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user_override/override.yml") assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:12", configPath)) } + +func TestLegacyRunAs(t *testing.T) { + b := load(t, "./run_as/legacy") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, diags.Error()) + + assert.Len(t, b.Config.Resources.Jobs, 3) + jobs := b.Config.Resources.Jobs + + // job_one and job_two should have the same run_as identity as the bundle. + assert.NotNil(t, jobs["job_one"].RunAs) + assert.Equal(t, "my_service_principal", jobs["job_one"].RunAs.ServicePrincipalName) + assert.Equal(t, "", jobs["job_one"].RunAs.UserName) + + assert.NotNil(t, jobs["job_two"].RunAs) + assert.Equal(t, "my_service_principal", jobs["job_two"].RunAs.ServicePrincipalName) + assert.Equal(t, "", jobs["job_two"].RunAs.UserName) + + // job_three should retain it's run_as identity. + assert.NotNil(t, jobs["job_three"].RunAs) + assert.Equal(t, "my_service_principal_for_job", jobs["job_three"].RunAs.ServicePrincipalName) + assert.Equal(t, "", jobs["job_three"].RunAs.UserName) + + // Assert owner permissions for pipelines are set. + pipelines := b.Config.Resources.Pipelines + assert.Len(t, pipelines["nyc_taxi_pipeline"].Permissions, 2) + + assert.Equal(t, "CAN_VIEW", pipelines["nyc_taxi_pipeline"].Permissions[0].Level) + assert.Equal(t, "my_user_name", pipelines["nyc_taxi_pipeline"].Permissions[0].UserName) + + assert.Equal(t, "IS_OWNER", pipelines["nyc_taxi_pipeline"].Permissions[1].Level) + assert.Equal(t, "my_service_principal", pipelines["nyc_taxi_pipeline"].Permissions[1].ServicePrincipalName) + + // Assert other resources are not affected. + assert.Equal(t, ml.Model{Name: "skynet"}, *b.Config.Resources.Models["model_one"].Model) + assert.Equal(t, catalog.CreateRegisteredModelRequest{Name: "skynet (in UC)"}, *b.Config.Resources.RegisteredModels["model_two"].CreateRegisteredModelRequest) + assert.Equal(t, ml.Experiment{Name: "experiment_one"}, *b.Config.Resources.Experiments["experiment_one"].Experiment) + assert.Equal(t, serving.CreateServingEndpoint{Name: "skynet"}, *b.Config.Resources.ModelServingEndpoints["model_serving_one"].CreateServingEndpoint) +} From 5ee4b41cd5bb2a469dbc409d212fe3625bdc7aed Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Tue, 23 Apr 2024 09:17:14 +0200 Subject: [PATCH 159/286] Decouple winget release (#1389) ## Changes We are starting to sign Windows CLI executables, but this has to be done from a machine with a Yubikey storing the signing certificate for the immediate future. As such, we will only trigger Winget publishing once the signed binaries have been uploaded to Github. Additionally, as an extra precaution, we will only release the signed binaries via Winget. ## Tests --- .github/workflows/publish-winget.yml | 16 ++++++++++++++++ .github/workflows/release.yml | 12 ------------ 2 files changed, 16 insertions(+), 12 deletions(-) create mode 100644 .github/workflows/publish-winget.yml diff --git a/.github/workflows/publish-winget.yml b/.github/workflows/publish-winget.yml new file mode 100644 index 000000000..19603e669 --- /dev/null +++ b/.github/workflows/publish-winget.yml @@ -0,0 +1,16 @@ +name: publish-winget + +on: + workflow_dispatch: + +jobs: + publish-to-winget-pkgs: + runs-on: windows-latest + environment: release + steps: + - uses: vedantmgoyal2009/winget-releaser@93fd8b606a1672ec3e5c6c3bb19426be68d1a8b0 # https://github.com/vedantmgoyal2009/winget-releaser/releases/tag/v2 + with: + identifier: Databricks.DatabricksCLI + installers-regex: 'windows_.*-signed\.zip$' # Only signed Windows releases + token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }} + fork-user: eng-dev-ecosystem-bot diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f9b4ec15f..e09b500fb 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -130,15 +130,3 @@ jobs: version: "${{ env.VERSION }}", } }); - - publish-to-winget-pkgs: - needs: goreleaser - runs-on: windows-latest - environment: release - steps: - - uses: vedantmgoyal2009/winget-releaser@93fd8b606a1672ec3e5c6c3bb19426be68d1a8b0 # https://github.com/vedantmgoyal2009/winget-releaser/releases/tag/v2 - with: - identifier: Databricks.DatabricksCLI - installers-regex: 'windows_.*\.zip$' # Only windows releases - token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }} - fork-user: eng-dev-ecosystem-bot From 27d35d5e1cbbed07f46d1d3cafc5621f79b6eedf Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 23 Apr 2024 12:54:40 +0200 Subject: [PATCH 160/286] Release v0.218.0 (#1391) This release marks the general availability of Databricks Asset Bundles. CLI: * Publish Docker images ([#1353](https://github.com/databricks/cli/pull/1353)). * Add support for multi-arch Docker images ([#1362](https://github.com/databricks/cli/pull/1362)). * Do not prefill https:// in prompt for Databricks Host ([#1364](https://github.com/databricks/cli/pull/1364)). * Add better documentation for the `auth login` command ([#1366](https://github.com/databricks/cli/pull/1366)). * Add URLs for authentication documentation to the auth command help ([#1365](https://github.com/databricks/cli/pull/1365)). Bundles: * Fix compute override for foreach tasks ([#1357](https://github.com/databricks/cli/pull/1357)). * Transform artifact files source patterns in build not upload stage ([#1359](https://github.com/databricks/cli/pull/1359)). * Convert between integer and float in normalization ([#1371](https://github.com/databricks/cli/pull/1371)). * Disable locking for development mode ([#1302](https://github.com/databricks/cli/pull/1302)). * Resolve variable references inside variable lookup fields ([#1368](https://github.com/databricks/cli/pull/1368)). * Added validate mutator to surface additional bundle warnings ([#1352](https://github.com/databricks/cli/pull/1352)). * Upgrade terraform-provider-databricks to 1.40.0 ([#1376](https://github.com/databricks/cli/pull/1376)). * Print host in `bundle validate` when passed via profile or environment variables ([#1378](https://github.com/databricks/cli/pull/1378)). * Cleanup remote file path on bundle destroy ([#1374](https://github.com/databricks/cli/pull/1374)). * Add docs URL for `run_as` in error message ([#1381](https://github.com/databricks/cli/pull/1381)). * Enable job queueing by default ([#1385](https://github.com/databricks/cli/pull/1385)). * Added support for job environments ([#1379](https://github.com/databricks/cli/pull/1379)). * Processing and completion of positional args to bundle run ([#1120](https://github.com/databricks/cli/pull/1120)). * Add legacy option for `run_as` ([#1384](https://github.com/databricks/cli/pull/1384)). API Changes: * Changed `databricks lakehouse-monitors cancel-refresh` command with new required argument order. * Changed `databricks lakehouse-monitors create` command with new required argument order. * Changed `databricks lakehouse-monitors delete` command with new required argument order. * Changed `databricks lakehouse-monitors get` command with new required argument order. * Changed `databricks lakehouse-monitors get-refresh` command with new required argument order. * Changed `databricks lakehouse-monitors list-refreshes` command with new required argument order. * Changed `databricks lakehouse-monitors run-refresh` command with new required argument order. * Changed `databricks lakehouse-monitors update` command with new required argument order. * Changed `databricks account workspace-assignment update` command to return response. OpenAPI commit 94684175b8bd65f8701f89729351f8069e8309c9 (2024-04-11) Dependency updates: * Bump github.com/databricks/databricks-sdk-go from 0.37.0 to 0.38.0 ([#1361](https://github.com/databricks/cli/pull/1361)). * Bump golang.org/x/net from 0.22.0 to 0.23.0 ([#1380](https://github.com/databricks/cli/pull/1380)). --- CHANGELOG.md | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e29984771..8b74498ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,49 @@ # Version changelog +## 0.218.0 + +This release marks the general availability of Databricks Asset Bundles. + +CLI: + * Publish Docker images ([#1353](https://github.com/databricks/cli/pull/1353)). + * Add support for multi-arch Docker images ([#1362](https://github.com/databricks/cli/pull/1362)). + * Do not prefill https:// in prompt for Databricks Host ([#1364](https://github.com/databricks/cli/pull/1364)). + * Add better documentation for the `auth login` command ([#1366](https://github.com/databricks/cli/pull/1366)). + * Add URLs for authentication documentation to the auth command help ([#1365](https://github.com/databricks/cli/pull/1365)). + +Bundles: + * Fix compute override for foreach tasks ([#1357](https://github.com/databricks/cli/pull/1357)). + * Transform artifact files source patterns in build not upload stage ([#1359](https://github.com/databricks/cli/pull/1359)). + * Convert between integer and float in normalization ([#1371](https://github.com/databricks/cli/pull/1371)). + * Disable locking for development mode ([#1302](https://github.com/databricks/cli/pull/1302)). + * Resolve variable references inside variable lookup fields ([#1368](https://github.com/databricks/cli/pull/1368)). + * Added validate mutator to surface additional bundle warnings ([#1352](https://github.com/databricks/cli/pull/1352)). + * Upgrade terraform-provider-databricks to 1.40.0 ([#1376](https://github.com/databricks/cli/pull/1376)). + * Print host in `bundle validate` when passed via profile or environment variables ([#1378](https://github.com/databricks/cli/pull/1378)). + * Cleanup remote file path on bundle destroy ([#1374](https://github.com/databricks/cli/pull/1374)). + * Add docs URL for `run_as` in error message ([#1381](https://github.com/databricks/cli/pull/1381)). + * Enable job queueing by default ([#1385](https://github.com/databricks/cli/pull/1385)). + * Added support for job environments ([#1379](https://github.com/databricks/cli/pull/1379)). + * Processing and completion of positional args to bundle run ([#1120](https://github.com/databricks/cli/pull/1120)). + * Add legacy option for `run_as` ([#1384](https://github.com/databricks/cli/pull/1384)). + +API Changes: + * Changed `databricks lakehouse-monitors cancel-refresh` command with new required argument order. + * Changed `databricks lakehouse-monitors create` command with new required argument order. + * Changed `databricks lakehouse-monitors delete` command with new required argument order. + * Changed `databricks lakehouse-monitors get` command with new required argument order. + * Changed `databricks lakehouse-monitors get-refresh` command with new required argument order. + * Changed `databricks lakehouse-monitors list-refreshes` command with new required argument order. + * Changed `databricks lakehouse-monitors run-refresh` command with new required argument order. + * Changed `databricks lakehouse-monitors update` command with new required argument order. + * Changed `databricks account workspace-assignment update` command to return response. + +OpenAPI commit 94684175b8bd65f8701f89729351f8069e8309c9 (2024-04-11) + +Dependency updates: + * Bump github.com/databricks/databricks-sdk-go from 0.37.0 to 0.38.0 ([#1361](https://github.com/databricks/cli/pull/1361)). + * Bump golang.org/x/net from 0.22.0 to 0.23.0 ([#1380](https://github.com/databricks/cli/pull/1380)). + ## 0.217.1 CLI: From 5120e943020cda1aaa9e9696c1f08bafb211538b Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 23 Apr 2024 18:55:25 +0530 Subject: [PATCH 161/286] Removed autogenerated docs for the CLI commands (#1392) This documentation is very outdated. We don't need this because the CLI is self-documenting. Also users who land on this page (because of SEO for example) can be confused because of the lack of documentation for new commands. Example issue: https://github.com/databricks/cli/issues/1331 --- docs/commands.md | 4216 ---------------------------------------------- 1 file changed, 4216 deletions(-) delete mode 100755 docs/commands.md diff --git a/docs/commands.md b/docs/commands.md deleted file mode 100755 index 701e8efbe..000000000 --- a/docs/commands.md +++ /dev/null @@ -1,4216 +0,0 @@ -# Available `databricks` commands - -- [databricks alerts - The alerts API can be used to perform CRUD operations on alerts.](#databricks-alerts---the-alerts-api-can-be-used-to-perform-crud-operations-on-alerts) - - [databricks alerts create - Create an alert.](#databricks-alerts-create---create-an-alert) - - [databricks alerts delete - Delete an alert.](#databricks-alerts-delete---delete-an-alert) - - [databricks alerts get - Get an alert.](#databricks-alerts-get---get-an-alert) - - [databricks alerts list - Get alerts.](#databricks-alerts-list---get-alerts) - - [databricks alerts update - Update an alert.](#databricks-alerts-update---update-an-alert) -- [databricks catalogs - A catalog is the first layer of Unity Catalog’s three-level namespace.](#databricks-catalogs---a-catalog-is-the-first-layer-of-unity-catalogs-three-level-namespace) - - [databricks catalogs create - Create a catalog.](#databricks-catalogs-create---create-a-catalog) - - [databricks catalogs delete - Delete a catalog.](#databricks-catalogs-delete---delete-a-catalog) - - [databricks catalogs get - Get a catalog.](#databricks-catalogs-get---get-a-catalog) - - [databricks catalogs list - List catalogs.](#databricks-catalogs-list---list-catalogs) - - [databricks catalogs update - Update a catalog.](#databricks-catalogs-update---update-a-catalog) -- [databricks cluster-policies - Cluster policy limits the ability to configure clusters based on a set of rules.](#databricks-cluster-policies---cluster-policy-limits-the-ability-to-configure-clusters-based-on-a-set-of-rules) - - [databricks cluster-policies create - Create a new policy.](#databricks-cluster-policies-create---create-a-new-policy) - - [databricks cluster-policies delete - Delete a cluster policy.](#databricks-cluster-policies-delete---delete-a-cluster-policy) - - [databricks cluster-policies edit - Update a cluster policy.](#databricks-cluster-policies-edit---update-a-cluster-policy) - - [databricks cluster-policies get - Get entity.](#databricks-cluster-policies-get---get-entity) - - [databricks cluster-policies list - Get a cluster policy.](#databricks-cluster-policies-list---get-a-cluster-policy) -- [databricks clusters - The Clusters API allows you to create, start, edit, list, terminate, and delete clusters.](#databricks-clusters---the-clusters-api-allows-you-to-create-start-edit-list-terminate-and-delete-clusters) - - [databricks clusters change-owner - Change cluster owner.](#databricks-clusters-change-owner---change-cluster-owner) - - [databricks clusters create - Create new cluster.](#databricks-clusters-create---create-new-cluster) - - [databricks clusters delete - Terminate cluster.](#databricks-clusters-delete---terminate-cluster) - - [databricks clusters edit - Update cluster configuration.](#databricks-clusters-edit---update-cluster-configuration) - - [databricks clusters events - List cluster activity events.](#databricks-clusters-events---list-cluster-activity-events) - - [databricks clusters get - Get cluster info.](#databricks-clusters-get---get-cluster-info) - - [databricks clusters list - List all clusters.](#databricks-clusters-list---list-all-clusters) - - [databricks clusters list-node-types - List node types.](#databricks-clusters-list-node-types---list-node-types) - - [databricks clusters list-zones - List availability zones.](#databricks-clusters-list-zones---list-availability-zones) - - [databricks clusters permanent-delete - Permanently delete cluster.](#databricks-clusters-permanent-delete---permanently-delete-cluster) - - [databricks clusters pin - Pin cluster.](#databricks-clusters-pin---pin-cluster) - - [databricks clusters resize - Resize cluster.](#databricks-clusters-resize---resize-cluster) - - [databricks clusters restart - Restart cluster.](#databricks-clusters-restart---restart-cluster) - - [databricks clusters spark-versions - List available Spark versions.](#databricks-clusters-spark-versions---list-available-spark-versions) - - [databricks clusters start - Start terminated cluster.](#databricks-clusters-start---start-terminated-cluster) - - [databricks clusters unpin - Unpin cluster.](#databricks-clusters-unpin---unpin-cluster) -- [databricks account credentials - These commands manage credential configurations for this workspace.](#databricks-account-credentials---these-commands-manage-credential-configurations-for-this-workspace) - - [databricks account credentials create - Create credential configuration.](#databricks-account-credentials-create---create-credential-configuration) - - [databricks account credentials delete - Delete credential configuration.](#databricks-account-credentials-delete---delete-credential-configuration) - - [databricks account credentials get - Get credential configuration.](#databricks-account-credentials-get---get-credential-configuration) - - [databricks account credentials list - Get all credential configurations.](#databricks-account-credentials-list---get-all-credential-configurations) -- [databricks current-user - command allows retrieving information about currently authenticated user or service principal.](#databricks-current-user---command-allows-retrieving-information-about-currently-authenticated-user-or-service-principal) - - [databricks current-user me - Get current user info.](#databricks-current-user-me---get-current-user-info) -- [databricks account custom-app-integration - manage custom oauth app integrations.](#databricks-account-custom-app-integration---manage-custom-oauth-app-integrations) - - [databricks account custom-app-integration create - Create Custom OAuth App Integration.](#databricks-account-custom-app-integration-create---create-custom-oauth-app-integration) - - [databricks account custom-app-integration delete - Delete Custom OAuth App Integration.](#databricks-account-custom-app-integration-delete---delete-custom-oauth-app-integration) - - [databricks account custom-app-integration get - Get OAuth Custom App Integration.](#databricks-account-custom-app-integration-get---get-oauth-custom-app-integration) - - [databricks account custom-app-integration list - Get custom oauth app integrations.](#databricks-account-custom-app-integration-list---get-custom-oauth-app-integrations) - - [databricks account custom-app-integration update - Updates Custom OAuth App Integration.](#databricks-account-custom-app-integration-update---updates-custom-oauth-app-integration) -- [databricks dashboards - Databricks SQL Dashboards](#databricks-dashboards---databricks-sql-dashboards) - - [databricks dashboards create - Create a dashboard object.](#databricks-dashboards-create---create-a-dashboard-object) - - [databricks dashboards delete - Remove a dashboard.](#databricks-dashboards-delete---remove-a-dashboard) - - [databricks dashboards get - Retrieve a definition.](#databricks-dashboards-get---retrieve-a-definition) - - [databricks dashboards list - Get dashboard objects.](#databricks-dashboards-list---get-dashboard-objects) - - [databricks dashboards restore - Restore a dashboard.](#databricks-dashboards-restore---restore-a-dashboard) -- [databricks data-sources - command is provided to assist you in making new query objects.](#databricks-data-sources---command-is-provided-to-assist-you-in-making-new-query-objects) - - [databricks data-sources list - Get a list of SQL warehouses.](#databricks-data-sources-list---get-a-list-of-sql-warehouses) -- [databricks account encryption-keys - manage encryption key configurations.](#databricks-account-encryption-keys---manage-encryption-key-configurations) - - [databricks account encryption-keys create - Create encryption key configuration.](#databricks-account-encryption-keys-create---create-encryption-key-configuration) - - [databricks account encryption-keys delete - Delete encryption key configuration.](#databricks-account-encryption-keys-delete---delete-encryption-key-configuration) - - [databricks account encryption-keys get - Get encryption key configuration.](#databricks-account-encryption-keys-get---get-encryption-key-configuration) - - [databricks account encryption-keys list - Get all encryption key configurations.](#databricks-account-encryption-keys-list---get-all-encryption-key-configurations) -- [databricks experiments - Manage MLflow experiments](#databricks-experiments---manage-mlflow-experiments) - - [databricks experiments create-experiment - Create experiment.](#databricks-experiments-create-experiment---create-experiment) - - [databricks experiments create-run - Create a run.](#databricks-experiments-create-run---create-a-run) - - [databricks experiments delete-experiment - Delete an experiment.](#databricks-experiments-delete-experiment---delete-an-experiment) - - [databricks experiments delete-run - Delete a run.](#databricks-experiments-delete-run---delete-a-run) - - [databricks experiments delete-tag - Delete a tag.](#databricks-experiments-delete-tag---delete-a-tag) - - [databricks experiments get-by-name - Get metadata.](#databricks-experiments-get-by-name---get-metadata) - - [databricks experiments get-experiment - Get an experiment.](#databricks-experiments-get-experiment---get-an-experiment) - - [databricks experiments get-history - Get history of a given metric within a run.](#databricks-experiments-get-history---get-history-of-a-given-metric-within-a-run) - - [databricks experiments get-run - Get a run.](#databricks-experiments-get-run---get-a-run) - - [databricks experiments list-artifacts - Get all artifacts.](#databricks-experiments-list-artifacts---get-all-artifacts) - - [databricks experiments list-experiments - List experiments.](#databricks-experiments-list-experiments---list-experiments) - - [databricks experiments log-batch - Log a batch.](#databricks-experiments-log-batch---log-a-batch) - - [databricks experiments log-metric - Log a metric.](#databricks-experiments-log-metric---log-a-metric) - - [databricks experiments log-model - Log a model.](#databricks-experiments-log-model---log-a-model) - - [databricks experiments log-param - Log a param.](#databricks-experiments-log-param---log-a-param) - - [databricks experiments restore-experiment - Restores an experiment.](#databricks-experiments-restore-experiment---restores-an-experiment) - - [databricks experiments restore-run - Restore a run.](#databricks-experiments-restore-run---restore-a-run) - - [databricks experiments search-experiments - Search experiments.](#databricks-experiments-search-experiments---search-experiments) - - [databricks experiments search-runs - Search for runs.](#databricks-experiments-search-runs---search-for-runs) - - [databricks experiments set-experiment-tag - Set a tag.](#databricks-experiments-set-experiment-tag---set-a-tag) - - [databricks experiments set-tag - Set a tag.](#databricks-experiments-set-tag---set-a-tag) - - [databricks experiments update-experiment - Update an experiment.](#databricks-experiments-update-experiment---update-an-experiment) - - [databricks experiments update-run - Update a run.](#databricks-experiments-update-run---update-a-run) -- [databricks external-locations - manage cloud storage path with a storage credential that authorizes access to it.](#databricks-external-locations---manage-cloud-storage-path-with-a-storage-credential-that-authorizes-access-to-it) - - [databricks external-locations create - Create an external location.](#databricks-external-locations-create---create-an-external-location) - - [databricks external-locations delete - Delete an external location.](#databricks-external-locations-delete---delete-an-external-location) - - [databricks external-locations get - Get an external location.](#databricks-external-locations-get---get-an-external-location) - - [databricks external-locations list - List external locations.](#databricks-external-locations-list---list-external-locations) - - [databricks external-locations update - Update an external location.](#databricks-external-locations-update---update-an-external-location) -- [databricks functions - Functions implement User-Defined Functions UDFs in Unity Catalog.](#databricks-functions---functions-implement-user-defined-functions-udfs-in-unity-catalog) - - [databricks functions create - Create a function.](#databricks-functions-create---create-a-function) - - [databricks functions delete - Delete a function.](#databricks-functions-delete---delete-a-function) - - [databricks functions get - Get a function.](#databricks-functions-get---get-a-function) - - [databricks functions list - List functions.](#databricks-functions-list---list-functions) - - [databricks functions update - Update a function.](#databricks-functions-update---update-a-function) -- [databricks git-credentials - Registers personal access token for Databricks to do operations on behalf of the user.](#databricks-git-credentials---registers-personal-access-token-for-databricks-to-do-operations-on-behalf-of-the-user) - - [databricks git-credentials create - Create a credential entry.](#databricks-git-credentials-create---create-a-credential-entry) - - [databricks git-credentials delete - Delete a credential.](#databricks-git-credentials-delete---delete-a-credential) - - [databricks git-credentials get - Get a credential entry.](#databricks-git-credentials-get---get-a-credential-entry) - - [databricks git-credentials list - Get Git credentials.](#databricks-git-credentials-list---get-git-credentials) - - [databricks git-credentials update - Update a credential.](#databricks-git-credentials-update---update-a-credential) -- [databricks global-init-scripts - configure global initialization scripts for the workspace.](#databricks-global-init-scripts---configure-global-initialization-scripts-for-the-workspace) - - [databricks global-init-scripts create - Create init script.](#databricks-global-init-scripts-create---create-init-script) - - [databricks global-init-scripts delete - Delete init script.](#databricks-global-init-scripts-delete---delete-init-script) - - [databricks global-init-scripts get - Get an init script.](#databricks-global-init-scripts-get---get-an-init-script) - - [databricks global-init-scripts list - Get init scripts.](#databricks-global-init-scripts-list---get-init-scripts) - - [databricks global-init-scripts update - Update init script.](#databricks-global-init-scripts-update---update-init-script) -- [databricks grants - Manage data access in Unity Catalog.](#databricks-grants---manage-data-access-in-unity-catalog) - - [databricks grants get - Get permissions.](#databricks-grants-get---get-permissions) - - [databricks grants get-effective - Get effective permissions.](#databricks-grants-get-effective---get-effective-permissions) - - [databricks grants update - Update permissions.](#databricks-grants-update---update-permissions) -- [databricks groups - Groups for identity management.](#databricks-groups---groups-for-identity-management) - - [databricks groups create - Create a new group.](#databricks-groups-create---create-a-new-group) - - [databricks groups delete - Delete a group.](#databricks-groups-delete---delete-a-group) - - [databricks groups get - Get group details.](#databricks-groups-get---get-group-details) - - [databricks groups list - List group details.](#databricks-groups-list---list-group-details) - - [databricks groups patch - Update group details.](#databricks-groups-patch---update-group-details) - - [databricks groups update - Replace a group.](#databricks-groups-update---replace-a-group) -- [databricks account groups - Account-level group management](#databricks-account-groups---account-level-group-management) - - [databricks account groups create - Create a new group.](#databricks-account-groups-create---create-a-new-group) - - [databricks account groups delete - Delete a group.](#databricks-account-groups-delete---delete-a-group) - - [databricks account groups get - Get group details.](#databricks-account-groups-get---get-group-details) - - [databricks account groups list - List group details.](#databricks-account-groups-list---list-group-details) - - [databricks account groups patch - Update group details.](#databricks-account-groups-patch---update-group-details) - - [databricks account groups update - Replace a group.](#databricks-account-groups-update---replace-a-group) -- [databricks instance-pools - manage ready-to-use cloud instances which reduces a cluster start and auto-scaling times.](#databricks-instance-pools---manage-ready-to-use-cloud-instances-which-reduces-a-cluster-start-and-auto-scaling-times) - - [databricks instance-pools create - Create a new instance pool.](#databricks-instance-pools-create---create-a-new-instance-pool) - - [databricks instance-pools delete - Delete an instance pool.](#databricks-instance-pools-delete---delete-an-instance-pool) - - [databricks instance-pools edit - Edit an existing instance pool.](#databricks-instance-pools-edit---edit-an-existing-instance-pool) - - [databricks instance-pools get - Get instance pool information.](#databricks-instance-pools-get---get-instance-pool-information) - - [databricks instance-pools list - List instance pool info.](#databricks-instance-pools-list---list-instance-pool-info) -- [databricks instance-profiles - Manage instance profiles that users can launch clusters with.](#databricks-instance-profiles---manage-instance-profiles-that-users-can-launch-clusters-with) - - [databricks instance-profiles add - Register an instance profile.](#databricks-instance-profiles-add---register-an-instance-profile) - - [databricks instance-profiles edit - Edit an instance profile.](#databricks-instance-profiles-edit---edit-an-instance-profile) - - [databricks instance-profiles list - List available instance profiles.](#databricks-instance-profiles-list---list-available-instance-profiles) - - [databricks instance-profiles remove - Remove the instance profile.](#databricks-instance-profiles-remove---remove-the-instance-profile) -- [databricks ip-access-lists - enable admins to configure IP access lists.](#databricks-ip-access-lists---enable-admins-to-configure-ip-access-lists) - - [databricks ip-access-lists create - Create access list.](#databricks-ip-access-lists-create---create-access-list) - - [databricks ip-access-lists delete - Delete access list.](#databricks-ip-access-lists-delete---delete-access-list) - - [databricks ip-access-lists get - Get access list.](#databricks-ip-access-lists-get---get-access-list) - - [databricks ip-access-lists list - Get access lists.](#databricks-ip-access-lists-list---get-access-lists) - - [databricks ip-access-lists replace - Replace access list.](#databricks-ip-access-lists-replace---replace-access-list) - - [databricks ip-access-lists update - Update access list.](#databricks-ip-access-lists-update---update-access-list) -- [databricks account ip-access-lists - The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console.](#databricks-account-ip-access-lists---the-accounts-ip-access-list-api-enables-account-admins-to-configure-ip-access-lists-for-access-to-the-account-console) - - [databricks account ip-access-lists create - Create access list.](#databricks-account-ip-access-lists-create---create-access-list) - - [databricks account ip-access-lists delete - Delete access list.](#databricks-account-ip-access-lists-delete---delete-access-list) - - [databricks account ip-access-lists get - Get IP access list.](#databricks-account-ip-access-lists-get---get-ip-access-list) - - [databricks account ip-access-lists list - Get access lists.](#databricks-account-ip-access-lists-list---get-access-lists) - - [databricks account ip-access-lists replace - Replace access list.](#databricks-account-ip-access-lists-replace---replace-access-list) - - [databricks account ip-access-lists update - Update access list.](#databricks-account-ip-access-lists-update---update-access-list) -- [databricks jobs - Manage Databricks Workflows.](#databricks-jobs---manage-databricks-workflows) - - [databricks jobs cancel-all-runs - Cancel all runs of a job.](#databricks-jobs-cancel-all-runs---cancel-all-runs-of-a-job) - - [databricks jobs cancel-run - Cancel a job run.](#databricks-jobs-cancel-run---cancel-a-job-run) - - [databricks jobs create - Create a new job.](#databricks-jobs-create---create-a-new-job) - - [databricks jobs delete - Delete a job.](#databricks-jobs-delete---delete-a-job) - - [databricks jobs delete-run - Delete a job run.](#databricks-jobs-delete-run---delete-a-job-run) - - [databricks jobs export-run - Export and retrieve a job run.](#databricks-jobs-export-run---export-and-retrieve-a-job-run) - - [databricks jobs get - Get a single job.](#databricks-jobs-get---get-a-single-job) - - [databricks jobs get-run - Get a single job run.](#databricks-jobs-get-run---get-a-single-job-run) - - [databricks jobs get-run-output - Get the output for a single run.](#databricks-jobs-get-run-output---get-the-output-for-a-single-run) - - [databricks jobs list - List all jobs.](#databricks-jobs-list---list-all-jobs) - - [databricks jobs list-runs - List runs for a job.](#databricks-jobs-list-runs---list-runs-for-a-job) - - [databricks jobs repair-run - Repair a job run.](#databricks-jobs-repair-run---repair-a-job-run) - - [databricks jobs reset - Overwrites all settings for a job.](#databricks-jobs-reset---overwrites-all-settings-for-a-job) - - [databricks jobs run-now - Trigger a new job run.](#databricks-jobs-run-now---trigger-a-new-job-run) - - [databricks jobs submit - Create and trigger a one-time run.](#databricks-jobs-submit---create-and-trigger-a-one-time-run) - - [databricks jobs update - Partially updates a job.](#databricks-jobs-update---partially-updates-a-job) -- [databricks libraries - Manage libraries on a cluster.](#databricks-libraries---manage-libraries-on-a-cluster) - - [databricks libraries all-cluster-statuses - Get all statuses.](#databricks-libraries-all-cluster-statuses---get-all-statuses) - - [databricks libraries cluster-status - Get status.](#databricks-libraries-cluster-status---get-status) - - [databricks libraries install - Add a library.](#databricks-libraries-install---add-a-library) - - [databricks libraries uninstall - Uninstall libraries.](#databricks-libraries-uninstall---uninstall-libraries) -- [databricks account log-delivery - These commands manage log delivery configurations for this account.](#databricks-account-log-delivery---these-commands-manage-log-delivery-configurations-for-this-account) - - [databricks account log-delivery create - Create a new log delivery configuration.](#databricks-account-log-delivery-create---create-a-new-log-delivery-configuration) - - [databricks account log-delivery get - Get log delivery configuration.](#databricks-account-log-delivery-get---get-log-delivery-configuration) - - [databricks account log-delivery list - Get all log delivery configurations.](#databricks-account-log-delivery-list---get-all-log-delivery-configurations) - - [databricks account log-delivery patch-status - Enable or disable log delivery configuration.](#databricks-account-log-delivery-patch-status---enable-or-disable-log-delivery-configuration) -- [databricks account metastore-assignments - These commands manage metastore assignments to a workspace.](#databricks-account-metastore-assignments---these-commands-manage-metastore-assignments-to-a-workspace) - - [databricks account metastore-assignments create - Assigns a workspace to a metastore.](#databricks-account-metastore-assignments-create---assigns-a-workspace-to-a-metastore) - - [databricks account metastore-assignments delete - Delete a metastore assignment.](#databricks-account-metastore-assignments-delete---delete-a-metastore-assignment) - - [databricks account metastore-assignments get - Gets the metastore assignment for a workspace.](#databricks-account-metastore-assignments-get---gets-the-metastore-assignment-for-a-workspace) - - [databricks account metastore-assignments list - Get all workspaces assigned to a metastore.](#databricks-account-metastore-assignments-list---get-all-workspaces-assigned-to-a-metastore) - - [databricks account metastore-assignments update - Updates a metastore assignment to a workspaces.](#databricks-account-metastore-assignments-update---updates-a-metastore-assignment-to-a-workspaces) -- [databricks metastores - Manage metastores in Unity Catalog.](#databricks-metastores---manage-metastores-in-unity-catalog) - - [databricks metastores assign - Create an assignment.](#databricks-metastores-assign---create-an-assignment) - - [databricks metastores create - Create a metastore.](#databricks-metastores-create---create-a-metastore) - - [databricks metastores current - Get metastore assignment for workspace.](#databricks-metastores-current---get-metastore-assignment-for-workspace) - - [databricks metastores delete - Delete a metastore.](#databricks-metastores-delete---delete-a-metastore) - - [databricks metastores get - Get a metastore.](#databricks-metastores-get---get-a-metastore) - - [databricks metastores list - List metastores.](#databricks-metastores-list---list-metastores) - - [databricks metastores maintenance - Enables or disables auto maintenance on the metastore.](#databricks-metastores-maintenance---enables-or-disables-auto-maintenance-on-the-metastore) - - [databricks metastores summary - Get a metastore summary.](#databricks-metastores-summary---get-a-metastore-summary) - - [databricks metastores unassign - Delete an assignment.](#databricks-metastores-unassign---delete-an-assignment) - - [databricks metastores update - Update a metastore.](#databricks-metastores-update---update-a-metastore) - - [databricks metastores update-assignment - Update an assignment.](#databricks-metastores-update-assignment---update-an-assignment) -- [databricks account metastores - These commands manage Unity Catalog metastores for an account.](#databricks-account-metastores---these-commands-manage-unity-catalog-metastores-for-an-account) - - [databricks account metastores create - Create metastore.](#databricks-account-metastores-create---create-metastore) - - [databricks account metastores delete - Delete a metastore.](#databricks-account-metastores-delete---delete-a-metastore) - - [databricks account metastores get - Get a metastore.](#databricks-account-metastores-get---get-a-metastore) - - [databricks account metastores list - Get all metastores associated with an account.](#databricks-account-metastores-list---get-all-metastores-associated-with-an-account) - - [databricks account metastores update - Update a metastore.](#databricks-account-metastores-update---update-a-metastore) -- [databricks model-registry - Expose commands for Model Registry.](#databricks-model-registry---expose-commands-for-model-registry) - - [databricks model-registry approve-transition-request - Approve transition request.](#databricks-model-registry-approve-transition-request---approve-transition-request) - - [databricks model-registry create-comment - Post a comment.](#databricks-model-registry-create-comment---post-a-comment) - - [databricks model-registry create-model - Create a model.](#databricks-model-registry-create-model---create-a-model) - - [databricks model-registry create-model-version - Create a model version.](#databricks-model-registry-create-model-version---create-a-model-version) - - [databricks model-registry create-transition-request - Make a transition request.](#databricks-model-registry-create-transition-request---make-a-transition-request) - - [databricks model-registry create-webhook - Create a webhook.](#databricks-model-registry-create-webhook---create-a-webhook) - - [databricks model-registry delete-comment - Delete a comment.](#databricks-model-registry-delete-comment---delete-a-comment) - - [databricks model-registry delete-model - Delete a model.](#databricks-model-registry-delete-model---delete-a-model) - - [databricks model-registry delete-model-tag - Delete a model tag.](#databricks-model-registry-delete-model-tag---delete-a-model-tag) - - [databricks model-registry delete-model-version - Delete a model version.](#databricks-model-registry-delete-model-version---delete-a-model-version) - - [databricks model-registry delete-model-version-tag - Delete a model version tag.](#databricks-model-registry-delete-model-version-tag---delete-a-model-version-tag) - - [databricks model-registry delete-transition-request - Delete a ransition request.](#databricks-model-registry-delete-transition-request---delete-a-ransition-request) - - [databricks model-registry delete-webhook - Delete a webhook.](#databricks-model-registry-delete-webhook---delete-a-webhook) - - [databricks model-registry get-latest-versions - Get the latest version.](#databricks-model-registry-get-latest-versions---get-the-latest-version) - - [databricks model-registry get-model - Get model.](#databricks-model-registry-get-model---get-model) - - [databricks model-registry get-model-version - Get a model version.](#databricks-model-registry-get-model-version---get-a-model-version) - - [databricks model-registry get-model-version-download-uri - Get a model version URI.](#databricks-model-registry-get-model-version-download-uri---get-a-model-version-uri) - - [databricks model-registry list-models - List models.](#databricks-model-registry-list-models---list-models) - - [databricks model-registry list-transition-requests - List transition requests.](#databricks-model-registry-list-transition-requests---list-transition-requests) - - [databricks model-registry list-webhooks - List registry webhooks.](#databricks-model-registry-list-webhooks---list-registry-webhooks) - - [databricks model-registry reject-transition-request - Reject a transition request.](#databricks-model-registry-reject-transition-request---reject-a-transition-request) - - [databricks model-registry rename-model - Rename a model.](#databricks-model-registry-rename-model---rename-a-model) - - [databricks model-registry search-model-versions - Searches model versions.](#databricks-model-registry-search-model-versions---searches-model-versions) - - [databricks model-registry search-models - Search models.](#databricks-model-registry-search-models---search-models) - - [databricks model-registry set-model-tag - Set a tag.](#databricks-model-registry-set-model-tag---set-a-tag) - - [databricks model-registry set-model-version-tag - Set a version tag.](#databricks-model-registry-set-model-version-tag---set-a-version-tag) - - [databricks model-registry test-registry-webhook - Test a webhook.](#databricks-model-registry-test-registry-webhook---test-a-webhook) - - [databricks model-registry transition-stage - Transition a stage.](#databricks-model-registry-transition-stage---transition-a-stage) - - [databricks model-registry update-comment - Update a comment.](#databricks-model-registry-update-comment---update-a-comment) - - [databricks model-registry update-model - Update model.](#databricks-model-registry-update-model---update-model) - - [databricks model-registry update-model-version - Update model version.](#databricks-model-registry-update-model-version---update-model-version) - - [databricks model-registry update-webhook - Update a webhook.](#databricks-model-registry-update-webhook---update-a-webhook) -- [databricks account networks - Manage network configurations.](#databricks-account-networks---manage-network-configurations) - - [databricks account networks create - Create network configuration.](#databricks-account-networks-create---create-network-configuration) - - [databricks account networks delete - Delete a network configuration.](#databricks-account-networks-delete---delete-a-network-configuration) - - [databricks account networks get - Get a network configuration.](#databricks-account-networks-get---get-a-network-configuration) - - [databricks account networks list - Get all network configurations.](#databricks-account-networks-list---get-all-network-configurations) -- [databricks account o-auth-enrollment - These commands enable administrators to enroll OAuth for their accounts, which is required for adding/using any OAuth published/custom application integration.](#databricks-account-o-auth-enrollment---these-commands-enable-administrators-to-enroll-oauth-for-their-accounts-which-is-required-for-addingusing-any-oauth-publishedcustom-application-integration) - - [databricks account o-auth-enrollment create - Create OAuth Enrollment request.](#databricks-account-o-auth-enrollment-create---create-oauth-enrollment-request) - - [databricks account o-auth-enrollment get - Get OAuth enrollment status.](#databricks-account-o-auth-enrollment-get---get-oauth-enrollment-status) -- [databricks permissions - Manage access for various users on different objects and endpoints.](#databricks-permissions---manage-access-for-various-users-on-different-objects-and-endpoints) - - [databricks permissions get - Get object permissions.](#databricks-permissions-get---get-object-permissions) - - [databricks permissions get-permission-levels - Get permission levels.](#databricks-permissions-get-permission-levels---get-permission-levels) - - [databricks permissions set - Set permissions.](#databricks-permissions-set---set-permissions) - - [databricks permissions update - Update permission.](#databricks-permissions-update---update-permission) -- [databricks pipelines - Manage Delta Live Tables from command-line.](#databricks-pipelines---manage-delta-live-tables-from-command-line) - - [databricks pipelines create - Create a pipeline.](#databricks-pipelines-create---create-a-pipeline) - - [databricks pipelines delete - Delete a pipeline.](#databricks-pipelines-delete---delete-a-pipeline) - - [databricks pipelines get - Get a pipeline.](#databricks-pipelines-get---get-a-pipeline) - - [databricks pipelines get-update - Get a pipeline update.](#databricks-pipelines-get-update---get-a-pipeline-update) - - [databricks pipelines list-pipeline-events - List pipeline events.](#databricks-pipelines-list-pipeline-events---list-pipeline-events) - - [databricks pipelines list-pipelines - List pipelines.](#databricks-pipelines-list-pipelines---list-pipelines) - - [databricks pipelines list-updates - List pipeline updates.](#databricks-pipelines-list-updates---list-pipeline-updates) - - [databricks pipelines reset - Reset a pipeline.](#databricks-pipelines-reset---reset-a-pipeline) - - [databricks pipelines start-update - Queue a pipeline update.](#databricks-pipelines-start-update---queue-a-pipeline-update) - - [databricks pipelines stop - Stop a pipeline.](#databricks-pipelines-stop---stop-a-pipeline) - - [databricks pipelines update - Edit a pipeline.](#databricks-pipelines-update---edit-a-pipeline) -- [databricks policy-families - View available policy families.](#databricks-policy-families---view-available-policy-families) - - [databricks policy-families get - get cluster policy family.](#databricks-policy-families-get---get-cluster-policy-family) - - [databricks policy-families list - list policy families.](#databricks-policy-families-list---list-policy-families) -- [databricks account private-access - PrivateLink settings.](#databricks-account-private-access---privatelink-settings) - - [databricks account private-access create - Create private access settings.](#databricks-account-private-access-create---create-private-access-settings) - - [databricks account private-access delete - Delete a private access settings object.](#databricks-account-private-access-delete---delete-a-private-access-settings-object) - - [databricks account private-access get - Get a private access settings object.](#databricks-account-private-access-get---get-a-private-access-settings-object) - - [databricks account private-access list - Get all private access settings objects.](#databricks-account-private-access-list---get-all-private-access-settings-objects) - - [databricks account private-access replace - Replace private access settings.](#databricks-account-private-access-replace---replace-private-access-settings) -- [databricks providers - Delta Sharing Providers commands.](#databricks-providers---delta-sharing-providers-commands) - - [databricks providers create - Create an auth provider.](#databricks-providers-create---create-an-auth-provider) - - [databricks providers delete - Delete a provider.](#databricks-providers-delete---delete-a-provider) - - [databricks providers get - Get a provider.](#databricks-providers-get---get-a-provider) - - [databricks providers list - List providers.](#databricks-providers-list---list-providers) - - [databricks providers list-shares - List shares by Provider.](#databricks-providers-list-shares---list-shares-by-provider) - - [databricks providers update - Update a provider.](#databricks-providers-update---update-a-provider) -- [databricks account published-app-integration - manage published OAuth app integrations like Tableau Cloud for Databricks in AWS cloud.](#databricks-account-published-app-integration---manage-published-oauth-app-integrations-like-tableau-cloud-for-databricks-in-aws-cloud) - - [databricks account published-app-integration create - Create Published OAuth App Integration.](#databricks-account-published-app-integration-create---create-published-oauth-app-integration) - - [databricks account published-app-integration delete - Delete Published OAuth App Integration.](#databricks-account-published-app-integration-delete---delete-published-oauth-app-integration) - - [databricks account published-app-integration get - Get OAuth Published App Integration.](#databricks-account-published-app-integration-get---get-oauth-published-app-integration) - - [databricks account published-app-integration list - Get published oauth app integrations.](#databricks-account-published-app-integration-list---get-published-oauth-app-integrations) - - [databricks account published-app-integration update - Updates Published OAuth App Integration.](#databricks-account-published-app-integration-update---updates-published-oauth-app-integration) -- [databricks queries - These endpoints are used for CRUD operations on query definitions.](#databricks-queries---these-endpoints-are-used-for-crud-operations-on-query-definitions) - - [databricks queries create - Create a new query definition.](#databricks-queries-create---create-a-new-query-definition) - - [databricks queries delete - Delete a query.](#databricks-queries-delete---delete-a-query) - - [databricks queries get - Get a query definition.](#databricks-queries-get---get-a-query-definition) - - [databricks queries list - Get a list of queries.](#databricks-queries-list---get-a-list-of-queries) - - [databricks queries restore - Restore a query.](#databricks-queries-restore---restore-a-query) - - [databricks queries update - Change a query definition.](#databricks-queries-update---change-a-query-definition) -- [databricks query-history - Access the history of queries through SQL warehouses.](#databricks-query-history---access-the-history-of-queries-through-sql-warehouses) - - [databricks query-history list - List Queries.](#databricks-query-history-list---list-queries) -- [databricks recipient-activation - Delta Sharing recipient activation commands.](#databricks-recipient-activation---delta-sharing-recipient-activation-commands) - - [databricks recipient-activation get-activation-url-info - Get a share activation URL.](#databricks-recipient-activation-get-activation-url-info---get-a-share-activation-url) - - [databricks recipient-activation retrieve-token - Get an access token.](#databricks-recipient-activation-retrieve-token---get-an-access-token) -- [databricks recipients - Delta Sharing recipients.](#databricks-recipients---delta-sharing-recipients) - - [databricks recipients create - Create a share recipient.](#databricks-recipients-create---create-a-share-recipient) - - [databricks recipients delete - Delete a share recipient.](#databricks-recipients-delete---delete-a-share-recipient) - - [databricks recipients get - Get a share recipient.](#databricks-recipients-get---get-a-share-recipient) - - [databricks recipients list - List share recipients.](#databricks-recipients-list---list-share-recipients) - - [databricks recipients rotate-token - Rotate a token.](#databricks-recipients-rotate-token---rotate-a-token) - - [databricks recipients share-permissions - Get recipient share permissions.](#databricks-recipients-share-permissions---get-recipient-share-permissions) - - [databricks recipients update - Update a share recipient.](#databricks-recipients-update---update-a-share-recipient) -- [databricks repos - Manage their git repos.](#databricks-repos---manage-their-git-repos) - - [databricks repos create - Create a repo.](#databricks-repos-create---create-a-repo) - - [databricks repos delete - Delete a repo.](#databricks-repos-delete---delete-a-repo) - - [databricks repos get - Get a repo.](#databricks-repos-get---get-a-repo) - - [databricks repos list - Get repos.](#databricks-repos-list---get-repos) - - [databricks repos update - Update a repo.](#databricks-repos-update---update-a-repo) -- [databricks schemas - Manage schemas in Unity Catalog.](#databricks-schemas---manage-schemas-in-unity-catalog) - - [databricks schemas create - Create a schema.](#databricks-schemas-create---create-a-schema) - - [databricks schemas delete - Delete a schema.](#databricks-schemas-delete---delete-a-schema) - - [databricks schemas get - Get a schema.](#databricks-schemas-get---get-a-schema) - - [databricks schemas list - List schemas.](#databricks-schemas-list---list-schemas) - - [databricks schemas update - Update a schema.](#databricks-schemas-update---update-a-schema) -- [databricks secrets - manage secrets, secret scopes, and access permissions.](#databricks-secrets---manage-secrets-secret-scopes-and-access-permissions) - - [databricks secrets create-scope - Create a new secret scope.](#databricks-secrets-create-scope---create-a-new-secret-scope) - - [databricks secrets delete-acl - Delete an ACL.](#databricks-secrets-delete-acl---delete-an-acl) - - [databricks secrets delete-scope - Delete a secret scope.](#databricks-secrets-delete-scope---delete-a-secret-scope) - - [databricks secrets delete-secret - Delete a secret.](#databricks-secrets-delete-secret---delete-a-secret) - - [databricks secrets get-acl - Get secret ACL details.](#databricks-secrets-get-acl---get-secret-acl-details) - - [databricks secrets list-acls - Lists ACLs.](#databricks-secrets-list-acls---lists-acls) - - [databricks secrets list-scopes - List all scopes.](#databricks-secrets-list-scopes---list-all-scopes) - - [databricks secrets list-secrets - List secret keys.](#databricks-secrets-list-secrets---list-secret-keys) - - [databricks secrets put-acl - Create/update an ACL.](#databricks-secrets-put-acl---createupdate-an-acl) - - [databricks secrets put-secret - Add a secret.](#databricks-secrets-put-secret---add-a-secret) -- [databricks service-principals - Manage service principals.](#databricks-service-principals---manage-service-principals) - - [databricks service-principals create - Create a service principal.](#databricks-service-principals-create---create-a-service-principal) - - [databricks service-principals delete - Delete a service principal.](#databricks-service-principals-delete---delete-a-service-principal) - - [databricks service-principals get - Get service principal details.](#databricks-service-principals-get---get-service-principal-details) - - [databricks service-principals list - List service principals.](#databricks-service-principals-list---list-service-principals) - - [databricks service-principals patch - Update service principal details.](#databricks-service-principals-patch---update-service-principal-details) - - [databricks service-principals update - Replace service principal.](#databricks-service-principals-update---replace-service-principal) -- [databricks account service-principals - Manage service principals on the account level.](#databricks-account-service-principals---manage-service-principals-on-the-account-level) - - [databricks account service-principals create - Create a service principal.](#databricks-account-service-principals-create---create-a-service-principal) - - [databricks account service-principals delete - Delete a service principal.](#databricks-account-service-principals-delete---delete-a-service-principal) - - [databricks account service-principals get - Get service principal details.](#databricks-account-service-principals-get---get-service-principal-details) - - [databricks account service-principals list - List service principals.](#databricks-account-service-principals-list---list-service-principals) - - [databricks account service-principals patch - Update service principal details.](#databricks-account-service-principals-patch---update-service-principal-details) - - [databricks account service-principals update - Replace service principal.](#databricks-account-service-principals-update---replace-service-principal) -- [databricks serving-endpoints - Manage model serving endpoints.](#databricks-serving-endpoints---manage-model-serving-endpoints) - - [databricks serving-endpoints build-logs - Retrieve the logs associated with building the model's environment for a given serving endpoint's served model.](#databricks-serving-endpoints-build-logs---retrieve-the-logs-associated-with-building-the-models-environment-for-a-given-serving-endpoints-served-model) - - [databricks serving-endpoints create - Create a new serving endpoint.](#databricks-serving-endpoints-create---create-a-new-serving-endpoint) - - [databricks serving-endpoints delete - Delete a serving endpoint.](#databricks-serving-endpoints-delete---delete-a-serving-endpoint) - - [databricks serving-endpoints export-metrics - Retrieve the metrics corresponding to a serving endpoint for the current time in Prometheus or OpenMetrics exposition format.](#databricks-serving-endpoints-export-metrics---retrieve-the-metrics-corresponding-to-a-serving-endpoint-for-the-current-time-in-prometheus-or-openmetrics-exposition-format) - - [databricks serving-endpoints get - Get a single serving endpoint.](#databricks-serving-endpoints-get---get-a-single-serving-endpoint) - - [databricks serving-endpoints list - Retrieve all serving endpoints.](#databricks-serving-endpoints-list---retrieve-all-serving-endpoints) - - [databricks serving-endpoints logs - Retrieve the most recent log lines associated with a given serving endpoint's served model.](#databricks-serving-endpoints-logs---retrieve-the-most-recent-log-lines-associated-with-a-given-serving-endpoints-served-model) - - [databricks serving-endpoints query - Query a serving endpoint with provided model input.](#databricks-serving-endpoints-query---query-a-serving-endpoint-with-provided-model-input) - - [databricks serving-endpoints update-config - Update a serving endpoint with a new config.](#databricks-serving-endpoints-update-config---update-a-serving-endpoint-with-a-new-config) -- [databricks shares - Databricks Shares commands.](#databricks-shares---databricks-shares-commands) - - [databricks shares create - Create a share.](#databricks-shares-create---create-a-share) - - [databricks shares delete - Delete a share.](#databricks-shares-delete---delete-a-share) - - [databricks shares get - Get a share.](#databricks-shares-get---get-a-share) - - [databricks shares list - List shares.](#databricks-shares-list---list-shares) - - [databricks shares share-permissions - Get permissions.](#databricks-shares-share-permissions---get-permissions) - - [databricks shares update - Update a share.](#databricks-shares-update---update-a-share) - - [databricks shares update-permissions - Update permissions.](#databricks-shares-update-permissions---update-permissions) -- [databricks account storage - Manage storage configurations for this workspace.](#databricks-account-storage---manage-storage-configurations-for-this-workspace) - - [databricks account storage create - Create new storage configuration.](#databricks-account-storage-create---create-new-storage-configuration) - - [databricks account storage delete - Delete storage configuration.](#databricks-account-storage-delete---delete-storage-configuration) - - [databricks account storage get - Get storage configuration.](#databricks-account-storage-get---get-storage-configuration) - - [databricks account storage list - Get all storage configurations.](#databricks-account-storage-list---get-all-storage-configurations) -- [databricks storage-credentials - Manage storage credentials for Unity Catalog.](#databricks-storage-credentials---manage-storage-credentials-for-unity-catalog) - - [databricks storage-credentials create - Create a storage credential.](#databricks-storage-credentials-create---create-a-storage-credential) - - [databricks storage-credentials delete - Delete a credential.](#databricks-storage-credentials-delete---delete-a-credential) - - [databricks storage-credentials get - Get a credential.](#databricks-storage-credentials-get---get-a-credential) - - [databricks storage-credentials list - List credentials.](#databricks-storage-credentials-list---list-credentials) - - [databricks storage-credentials update - Update a credential.](#databricks-storage-credentials-update---update-a-credential) - - [databricks storage-credentials validate - Validate a storage credential.](#databricks-storage-credentials-validate---validate-a-storage-credential) -- [databricks account storage-credentials - These commands manage storage credentials for a particular metastore.](#databricks-account-storage-credentials---these-commands-manage-storage-credentials-for-a-particular-metastore) - - [databricks account storage-credentials create - Create a storage credential.](#databricks-account-storage-credentials-create---create-a-storage-credential) - - [databricks account storage-credentials get - Gets the named storage credential.](#databricks-account-storage-credentials-get---gets-the-named-storage-credential) - - [databricks account storage-credentials list - Get all storage credentials assigned to a metastore.](#databricks-account-storage-credentials-list---get-all-storage-credentials-assigned-to-a-metastore) -- [databricks table-constraints - Primary key and foreign key constraints encode relationships between fields in tables.](#databricks-table-constraints---primary-key-and-foreign-key-constraints-encode-relationships-between-fields-in-tables) - - [databricks table-constraints create - Create a table constraint.](#databricks-table-constraints-create---create-a-table-constraint) - - [databricks table-constraints delete - Delete a table constraint.](#databricks-table-constraints-delete---delete-a-table-constraint) -- [databricks tables - A table resides in the third layer of Unity Catalog’s three-level namespace.](#databricks-tables---a-table-resides-in-the-third-layer-of-unity-catalogs-three-level-namespace) - - [databricks tables delete - Delete a table.](#databricks-tables-delete---delete-a-table) - - [databricks tables get - Get a table.](#databricks-tables-get---get-a-table) - - [databricks tables list - List tables.](#databricks-tables-list---list-tables) - - [databricks tables list-summaries - List table summaries.](#databricks-tables-list-summaries---list-table-summaries) -- [databricks token-management - Enables administrators to get all tokens and delete tokens for other users.](#databricks-token-management---enables-administrators-to-get-all-tokens-and-delete-tokens-for-other-users) - - [databricks token-management create-obo-token - Create on-behalf token.](#databricks-token-management-create-obo-token---create-on-behalf-token) - - [databricks token-management delete - Delete a token.](#databricks-token-management-delete---delete-a-token) - - [databricks token-management get - Get token info.](#databricks-token-management-get---get-token-info) - - [databricks token-management list - List all tokens.](#databricks-token-management-list---list-all-tokens) -- [databricks tokens - The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks commandss.](#databricks-tokens---the-token-api-allows-you-to-create-list-and-revoke-tokens-that-can-be-used-to-authenticate-and-access-databricks-commandss) - - [databricks tokens create - Create a user token.](#databricks-tokens-create---create-a-user-token) - - [databricks tokens delete - Revoke token.](#databricks-tokens-delete---revoke-token) - - [databricks tokens list - List tokens.](#databricks-tokens-list---list-tokens) -- [databricks users - Manage users on the workspace-level.](#databricks-users---manage-users-on-the-workspace-level) - - [databricks users create - Create a new user.](#databricks-users-create---create-a-new-user) - - [databricks users delete - Delete a user.](#databricks-users-delete---delete-a-user) - - [databricks users get - Get user details.](#databricks-users-get---get-user-details) - - [databricks users list - List users.](#databricks-users-list---list-users) - - [databricks users patch - Update user details.](#databricks-users-patch---update-user-details) - - [databricks users update - Replace a user.](#databricks-users-update---replace-a-user) -- [databricks account users - Manage users on the accou](#databricks-account-users---manage-users-on-the-accou) - - [databricks account users create - Create a new user.](#databricks-account-users-create---create-a-new-user) - - [databricks account users delete - Delete a user.](#databricks-account-users-delete---delete-a-user) - - [databricks account users get - Get user details.](#databricks-account-users-get---get-user-details) - - [databricks account users list - List users.](#databricks-account-users-list---list-users) - - [databricks account users patch - Update user details.](#databricks-account-users-patch---update-user-details) - - [databricks account users update - Replace a user.](#databricks-account-users-update---replace-a-user) -- [databricks account vpc-endpoints - Manage VPC endpoints.](#databricks-account-vpc-endpoints---manage-vpc-endpoints) - - [databricks account vpc-endpoints create - Create VPC endpoint configuration.](#databricks-account-vpc-endpoints-create---create-vpc-endpoint-configuration) - - [databricks account vpc-endpoints delete - Delete VPC endpoint configuration.](#databricks-account-vpc-endpoints-delete---delete-vpc-endpoint-configuration) - - [databricks account vpc-endpoints get - Get a VPC endpoint configuration.](#databricks-account-vpc-endpoints-get---get-a-vpc-endpoint-configuration) - - [databricks account vpc-endpoints list - Get all VPC endpoint configurations.](#databricks-account-vpc-endpoints-list---get-all-vpc-endpoint-configurations) -- [databricks warehouses - Manage Databricks SQL warehouses.](#databricks-warehouses---manage-databricks-sql-warehouses) - - [databricks warehouses create - Create a warehouse.](#databricks-warehouses-create---create-a-warehouse) - - [databricks warehouses delete - Delete a warehouse.](#databricks-warehouses-delete---delete-a-warehouse) - - [databricks warehouses edit - Update a warehouse.](#databricks-warehouses-edit---update-a-warehouse) - - [databricks warehouses get - Get warehouse info.](#databricks-warehouses-get---get-warehouse-info) - - [databricks warehouses get-workspace-warehouse-config - Get the workspace configuration.](#databricks-warehouses-get-workspace-warehouse-config---get-the-workspace-configuration) - - [databricks warehouses list - List warehouses.](#databricks-warehouses-list---list-warehouses) - - [databricks warehouses set-workspace-warehouse-config - Set the workspace configuration.](#databricks-warehouses-set-workspace-warehouse-config---set-the-workspace-configuration) - - [databricks warehouses start - Start a warehouse.](#databricks-warehouses-start---start-a-warehouse) - - [databricks warehouses stop - Stop a warehouse.](#databricks-warehouses-stop---stop-a-warehouse) -- [databricks workspace - The Workspace API allows you to list, import, export, and delete notebooks and folders.](#databricks-workspace---the-workspace-api-allows-you-to-list-import-export-and-delete-notebooks-and-folders) - - [databricks workspace delete - Delete a workspace object.](#databricks-workspace-delete---delete-a-workspace-object) - - [databricks workspace export - Export a workspace object.](#databricks-workspace-export---export-a-workspace-object) - - [databricks workspace get-status - Get status.](#databricks-workspace-get-status---get-status) - - [databricks workspace import - Import a workspace object.](#databricks-workspace-import---import-a-workspace-object) - - [databricks workspace list - List contents.](#databricks-workspace-list---list-contents) - - [databricks workspace mkdirs - Create a directory.](#databricks-workspace-mkdirs---create-a-directory) -- [databricks account workspace-assignment - The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account.](#databricks-account-workspace-assignment---the-workspace-permission-assignment-api-allows-you-to-manage-workspace-permissions-for-principals-in-your-account) - - [databricks account workspace-assignment delete - Delete permissions assignment.](#databricks-account-workspace-assignment-delete---delete-permissions-assignment) - - [databricks account workspace-assignment get - List workspace permissions.](#databricks-account-workspace-assignment-get---list-workspace-permissions) - - [databricks account workspace-assignment list - Get permission assignments.](#databricks-account-workspace-assignment-list---get-permission-assignments) - - [databricks account workspace-assignment update - Create or update permissions assignment.](#databricks-account-workspace-assignment-update---create-or-update-permissions-assignment) -- [databricks workspace-conf - command allows updating known workspace settings for advanced users.](#databricks-workspace-conf---command-allows-updating-known-workspace-settings-for-advanced-users) - - [databricks workspace-conf get-status - Check configuration status.](#databricks-workspace-conf-get-status---check-configuration-status) - - [databricks workspace-conf set-status - Enable/disable features.](#databricks-workspace-conf-set-status---enabledisable-features) -- [databricks account workspaces - These commands manage workspaces for this account.](#databricks-account-workspaces---these-commands-manage-workspaces-for-this-account) - - [databricks account workspaces create - Create a new workspace.](#databricks-account-workspaces-create---create-a-new-workspace) - - [databricks account workspaces delete - Delete a workspace.](#databricks-account-workspaces-delete---delete-a-workspace) - - [databricks account workspaces get - Get a workspace.](#databricks-account-workspaces-get---get-a-workspace) - - [databricks account workspaces list - Get all workspaces.](#databricks-account-workspaces-list---get-all-workspaces) - - [databricks account workspaces update - Update workspace configuration.](#databricks-account-workspaces-update---update-workspace-configuration) - - -## `databricks alerts` - The alerts API can be used to perform CRUD operations on alerts. - -The alerts API can be used to perform CRUD operations on alerts. An alert is a Databricks SQL -object that periodically runs a query, evaluates a condition of its result, and notifies one -or more users and/or notification destinations if the condition was met. - -### `databricks alerts create` - Create an alert. - -An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, -and notifies users or notification destinations if the condition was met. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body * `--parent` - The identifier of the workspace folder containing the alert. - * `--rearm` - Number of seconds after being triggered before the alert rearms itself and can be triggered again. - -### `databricks alerts delete` - Delete an alert. - -Deletes an alert. Deleted alerts are no longer accessible and cannot be restored. -**Note:** Unlike queries and dashboards, alerts cannot be moved to the trash. - -### `databricks alerts get` - Get an alert. - -Gets an alert. - -### `databricks alerts list` - Get alerts. - -Gets a list of alerts. - -### `databricks alerts update` - Update an alert. - -Updates an alert. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--rearm` - Number of seconds after being triggered before the alert rearms itself and can be triggered again. - -## `databricks catalogs` - A catalog is the first layer of Unity Catalog’s three-level namespace. - -A catalog is the first layer of Unity Catalog’s three-level namespace. It’s used to organize -your data assets. Users can see all catalogs on which they have been assigned the USE_CATALOG -data permission. - -In Unity Catalog, admins and data stewards manage users and their access to data centrally -across all of the workspaces in a Databricks account. Users in different workspaces can -share access to the same data, depending on privileges granted centrally in Unity Catalog. - -### `databricks catalogs create` - Create a catalog. - -Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the **CREATE_CATALOG** privilege. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - User-provided free-form text description. - * `--provider-name` - The name of delta sharing provider. - * `--share-name` - The name of the share under the share provider. - * `--storage-root` - Storage root URL for managed tables within catalog. - -### `databricks catalogs delete` - Delete a catalog. - -Deletes the catalog that matches the supplied name. The caller must be a metastore admin or the owner of the catalog. - -Flags: -* `--force` - Force deletion even if the catalog is not empty. - -### `databricks catalogs get` - Get a catalog. - -Gets the specified catalog in a metastore. The caller must be a metastore admin, the owner of the catalog, or a user that has the **USE_CATALOG** privilege set for their account. - -### `databricks catalogs list` - List catalogs. - -Gets an array of catalogs in the metastore. -If the caller is the metastore admin, all catalogs will be retrieved. -Otherwise, only catalogs owned by the caller (or for which the caller has the **USE_CATALOG** privilege) will be retrieved. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks catalogs update` - Update a catalog. - -Updates the catalog that matches the supplied name. -The caller must be either the owner of the catalog, or a metastore admin (when changing the owner field of the catalog). - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - User-provided free-form text description. - * `--name` - Name of catalog. - * `--owner` - Username of current owner of catalog. - -## `databricks cluster-policies` - Cluster policy limits the ability to configure clusters based on a set of rules. - -Cluster policy limits the ability to configure clusters based on a set of rules. The policy -rules limit the attributes or attribute values available for cluster creation. Cluster -policies have ACLs that limit their use to specific users and groups. - -Cluster policies let you limit users to create clusters with prescribed settings, simplify -the user interface and enable more users to create their own clusters (by fixing and hiding -some values), control cost by limiting per cluster maximum cost (by setting limits on -attributes whose values contribute to hourly price). - -Cluster policy permissions limit which policies a user can select in the Policy drop-down -when the user creates a cluster: -- A user who has cluster create permission can select the Unrestricted policy and create - fully-configurable clusters. -- A user who has both cluster create permission and access to cluster policies can select - the Unrestricted policy and policies they have access to. -- A user that has access to only cluster policies, can select the policies they have access to. - -If no policies have been created in the workspace, the Policy drop-down does not display. - -Only admin users can create, edit, and delete policies. -Admin users also have access to all policies. - -### `databricks cluster-policies create` - Create a new policy. - -Creates a new policy with prescribed settings. - -Flags: - * `--definition` - Policy definition document expressed in Databricks Cluster Policy Definition Language. - * `--description` - Additional human-readable description of the cluster policy. - * `--max-clusters-per-user` - Max number of clusters per user that can be active using this policy. - * `--policy-family-definition-overrides` - Policy definition JSON document expressed in Databricks Policy Definition Language. - * `--policy-family-id` - ID of the policy family. - -### `databricks cluster-policies delete` - Delete a cluster policy. - -Delete a policy for a cluster. Clusters governed by this policy can still run, but cannot be edited. - -### `databricks cluster-policies edit` - Update a cluster policy. - -Update an existing policy for cluster. This operation may make some clusters governed by the previous policy invalid. - -Flags: - * `--definition` - Policy definition document expressed in Databricks Cluster Policy Definition Language. - * `--description` - Additional human-readable description of the cluster policy. - * `--max-clusters-per-user` - Max number of clusters per user that can be active using this policy. - * `--policy-family-definition-overrides` - Policy definition JSON document expressed in Databricks Policy Definition Language. - * `--policy-family-id` - ID of the policy family. - -### `databricks cluster-policies get` - Get entity. - -Get a cluster policy entity. Creation and editing is available to admins only. - -### `databricks cluster-policies list` - Get a cluster policy. - -Returns a list of policies accessible by the requesting user. - -Flags: - * `--sort-column` - The cluster policy attribute to sort by. - * `--sort-order` - The order in which the policies get listed. - -## `databricks clusters` - The Clusters API allows you to create, start, edit, list, terminate, and delete clusters. - -Databricks maps cluster node instance types to compute units known as DBUs. See the instance -type pricing page for a list of the supported instance types and their corresponding DBUs. - -A Databricks cluster is a set of computation resources and configurations on which you run -data engineering, data science, and data analytics workloads, such as production -ETL pipelines, streaming analytics, ad-hoc analytics, and machine learning. - -You run these workloads as a set of commands in a notebook or as an automated job. -Databricks makes a distinction between all-purpose clusters and job clusters. You use -all-purpose clusters to analyze data collaboratively using interactive notebooks. You use -job clusters to run fast and robust automated jobs. - -You can create an all-purpose cluster using the UI, CLI, or commands. You can manually -terminate and restart an all-purpose cluster. Multiple users can share such clusters to do -collaborative interactive analysis. - -IMPORTANT: Databricks retains cluster configuration information for up to 200 all-purpose -clusters terminated in the last 30 days and up to 30 job clusters recently terminated by -the job scheduler. To keep an all-purpose cluster configuration even after it has been -terminated for more than 30 days, an administrator can pin a cluster to the cluster list. - -### `databricks clusters change-owner` - Change cluster owner. - -Change the owner of the cluster. You must be an admin to perform this operation. - -### `databricks clusters create` - Create new cluster. - -Creates a new Spark cluster. This method will acquire new instances from the cloud provider if necessary. -This method is asynchronous; the returned `cluster_id` can be used to poll the cluster status. -When this method returns, the cluster will be in a `PENDING` state. -The cluster will be usable once it enters a `RUNNING` state. - -Note: Databricks may not be able to acquire some of the requested nodes, due to cloud provider limitations -(account limits, spot price, etc.) or transient network issues. - -If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed. -Otherwise the cluster will terminate with an informative error message. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--apply-policy-default-values` - Note: This field won't be true for webapp requests. - * `--autotermination-minutes` - Automatically terminates the cluster after it is inactive for this time in minutes. - * `--cluster-name` - Cluster name requested by the user. - * `--cluster-source` - Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. - * `--driver-instance-pool-id` - The optional ID of the instance pool for the driver of the cluster belongs. - * `--driver-node-type-id` - The node type of the Spark driver. - * `--enable-elastic-disk` - Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space. - * `--enable-local-disk-encryption` - Whether to enable LUKS on cluster VMs' local disks. - * `--instance-pool-id` - The optional ID of the instance pool to which the cluster belongs. - * `--node-type-id` - This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. - * `--num-workers` - Number of worker nodes that this cluster should have. - * `--policy-id` - The ID of the cluster policy used to create the cluster if applicable. - * `--runtime-engine` - Decides which runtime engine to be use, e.g. - -### `databricks clusters delete` - Terminate cluster. - -Terminates the Spark cluster with the specified ID. The cluster is removed asynchronously. -Once the termination has completed, the cluster will be in a `TERMINATED` state. -If the cluster is already in a `TERMINATING` or `TERMINATED` state, nothing will happen. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED state. - * `--timeout` - maximum amount of time to reach TERMINATED state. - -### `databricks clusters edit` - Update cluster configuration. - -Updates the configuration of a cluster to match the provided attributes and size. -A cluster can be updated if it is in a `RUNNING` or `TERMINATED` state. - -If a cluster is updated while in a `RUNNING` state, it will be restarted so that the new attributes can take effect. - -If a cluster is updated while in a `TERMINATED` state, it will remain `TERMINATED`. -The next time it is started using the `clusters/start` API, the new attributes will take effect. -Any attempt to update a cluster in any other state will be rejected with an `INVALID_STATE` error code. - -Clusters created by the Databricks Jobs service cannot be edited. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--apply-policy-default-values` - Note: This field won't be true for webapp requests. - * `--autotermination-minutes` - Automatically terminates the cluster after it is inactive for this time in minutes. - * `--cluster-name` - Cluster name requested by the user. - * `--cluster-source` - Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. - * `--driver-instance-pool-id` - The optional ID of the instance pool for the driver of the cluster belongs. - * `--driver-node-type-id` - The node type of the Spark driver. - * `--enable-elastic-disk` - Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space. - * `--enable-local-disk-encryption` - Whether to enable LUKS on cluster VMs' local disks. - * `--instance-pool-id` - The optional ID of the instance pool to which the cluster belongs. - * `--node-type-id` - This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. - * `--num-workers` - Number of worker nodes that this cluster should have. - * `--policy-id` - The ID of the cluster policy used to create the cluster if applicable. - * `--runtime-engine` - Decides which runtime engine to be use, e.g. - -### `databricks clusters events` - List cluster activity events. - -Retrieves a list of events about the activity of a cluster. -command is paginated. If there are more events to read, the response includes all the nparameters necessary to request -the next page of events. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--end-time` - The end time in epoch milliseconds. - * `--limit` - The maximum number of events to include in a page of events. - * `--offset` - The offset in the result set. - * `--order` - The order to list events in; either "ASC" or "DESC". - * `--start-time` - The start time in epoch milliseconds. - -### `databricks clusters get` - Get cluster info. - -"Retrieves the information for a cluster given its identifier. -Clusters can be described while they are running, or up to 60 days after they are terminated. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks clusters list` - List all clusters. - -Return information about all pinned clusters, active clusters, up to 200 of the most recently terminated all-purpose clusters in -the past 30 days, and up to 30 of the most recently terminated job clusters in the past 30 days. - -For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated all-purpose clusters in the past 30 days, -and 50 terminated job clusters in the past 30 days, then command returns the 1 pinned cluster, 4 active clusters, -all 45 terminated all-purpose clusters, and the 30 most recently terminated job clusters. - -Flags: - * `--can-use-client` - Filter clusters based on what type of client it can be used for. - -### `databricks clusters list-node-types` - List node types. - -Returns a list of supported Spark node types. These node types can be used to launch a cluster. - -### `databricks clusters list-zones` - List availability zones. - -Returns a list of availability zones where clusters can be created in (For example, us-west-2a). -These zones can be used to launch a cluster. - -### `databricks clusters permanent-delete` - Permanently delete cluster. - -Permanently deletes a Spark cluster. This cluster is terminated and resources are asynchronously removed. - -In addition, users will no longer see permanently deleted clusters in the cluster list, and API users can no longer -perform any action on permanently deleted clusters. - -### `databricks clusters pin` - Pin cluster. - -Pinning a cluster ensures that the cluster will always be returned by the ListClusters API. -Pinning a cluster that is already pinned will have no effect. -command can only be called by workspace admins. - -### `databricks clusters resize` - Resize cluster. - -Resizes a cluster to have a desired number of workers. This will fail unless the cluster is in a `RUNNING` state. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--num-workers` - Number of worker nodes that this cluster should have. - -### `databricks clusters restart` - Restart cluster. - -Restarts a Spark cluster with the supplied ID. If the cluster is not currently in a `RUNNING` state, nothing will happen. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks clusters spark-versions` - List available Spark versions. - -Returns the list of available Spark versions. These versions can be used to launch a cluster. - -### `databricks clusters start` - Start terminated cluster. - -Starts a terminated Spark cluster with the supplied ID. -This works similar to `createCluster` except: - -* The previous cluster id and attributes are preserved. -* The cluster starts with the last specified cluster size. -* If the previous cluster was an autoscaling cluster, the current cluster starts with the minimum number of nodes. -* If the cluster is not currently in a `TERMINATED` state, nothing will happen. -* Clusters launched to run a job cannot be started. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks clusters unpin` - Unpin cluster. - -Unpinning a cluster will allow the cluster to eventually be removed from the ListClusters API. -Unpinning a cluster that is not pinned will have no effect. -command can only be called by workspace admins. - -## `databricks account credentials` - These commands manage credential configurations for this workspace. - -Databricks needs access to a cross-account service IAM role in your AWS account so that Databricks can deploy clusters -in the appropriate VPC for the new workspace. A credential configuration encapsulates this -role information, and its ID is used when creating a new workspace. - -### `databricks account credentials create` - Create credential configuration. - -Creates a Databricks credential configuration that represents cloud cross-account credentials for a specified account. Databricks uses this to set up network infrastructure properly to host Databricks clusters. For your AWS IAM role, you need to trust the External ID (the Databricks Account API account ID) in the returned credential object, and configure the required access policy. - -Save the response's `credentials_id` field, which is the ID for your new credential configuration object. - -For information about how to create a new workspace with command, see [Create a new workspace using the Account API](http://docs.databricks.com/administration-guide/account-api/new-workspace.html) - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account credentials delete` - Delete credential configuration. - -Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any workspace. - -### `databricks account credentials get` - Get credential configuration. - -Gets a Databricks credential configuration object for an account, both specified by ID. - -### `databricks account credentials list` - Get all credential configurations. - -Gets all Databricks credential configurations associated with an account specified by ID. - -## `databricks current-user` - command allows retrieving information about currently authenticated user or service principal. - -**NOTE** **this command may change** - -command allows retrieving information about currently authenticated user or -service principal. - -### `databricks current-user me` - Get current user info. - -Get details about the current method caller's identity. - -## `databricks account custom-app-integration` - manage custom oauth app integrations. - -These commands enable administrators to manage custom oauth app integrations, which is required for -adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. - -**Note:** You can only add/use the OAuth custom application integrations when OAuth enrollment -status is enabled. - -### `databricks account custom-app-integration create` - Create Custom OAuth App Integration. - -Create Custom OAuth App Integration. - -You can retrieve the custom oauth app integration via :method:get. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--confidential` - indicates if an oauth client-secret should be generated. - -### `databricks account custom-app-integration delete` - Delete Custom OAuth App Integration. - -Delete an existing Custom OAuth App Integration. -You can retrieve the custom oauth app integration via :method:get. - -### `databricks account custom-app-integration get` - Get OAuth Custom App Integration. - -Gets the Custom OAuth App Integration for the given integration id. - -### `databricks account custom-app-integration list` - Get custom oauth app integrations. - -Get the list of custom oauth app integrations for the specified Databricks Account - -### `databricks account custom-app-integration update` - Updates Custom OAuth App Integration. - -Updates an existing custom OAuth App Integration. -You can retrieve the custom oauth app integration via :method:get. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks dashboards` - Databricks SQL Dashboards - -Manage SQL Dashboards from CLI. - -### `databricks dashboards create` - Create a dashboard object. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--dashboard-filters-enabled` - In the web application, query filters that share a name are coupled to a single selection box if this value is true. - * `--is-draft` - Draft dashboards only appear in list views for their owners. - * `--is-trashed` - Indicates whether the dashboard is trashed. - * `--name` - The title of this dashboard that appears in list views and at the top of the dashboard page. - * `--parent` - The identifier of the workspace folder containing the dashboard. - -### `databricks dashboards delete` - Remove a dashboard. - -Moves a dashboard to the trash. Trashed dashboards do not appear in list views or searches, and cannot be shared. - -### `databricks dashboards get` - Retrieve a definition. - -Returns a JSON representation of a dashboard object, including its visualization and query objects. - -### `databricks dashboards list` - Get dashboard objects. - -Fetch a paginated list of dashboard objects. - -Flags: - * `--order` - Name of dashboard attribute to order by. - * `--page` - Page number to retrieve. - * `--page-size` - Number of dashboards to return per page. - * `--q` - Full text search term. - -### `databricks dashboards restore` - Restore a dashboard. - -A restored dashboard appears in list views and searches and can be shared. - -## `databricks data-sources` - command is provided to assist you in making new query objects. - -command is provided to assist you in making new query objects. When creating a query object, -you may optionally specify a `data_source_id` for the SQL warehouse against which it will run. -If you don't already know the `data_source_id` for your desired SQL warehouse, command will -help you find it. - -command does not support searches. It returns the full list of SQL warehouses in your -workspace. We advise you to use any text editor, REST client, or `grep` to search the -response from command for the name of your SQL warehouse as it appears in Databricks SQL. - -### `databricks data-sources list` - Get a list of SQL warehouses. - -Retrieves a full list of SQL warehouses available in this workspace. -All fields that appear in command response are enumerated for clarity. -However, you need only a SQL warehouse's `id` to create new queries against it. - -## `databricks account encryption-keys` - manage encryption key configurations. - -These commands manage encryption key configurations for this workspace (optional). A key -configuration encapsulates the AWS KMS key information and some information about how -the key configuration can be used. There are two possible uses for key configurations: - -* Managed services: A key configuration can be used to encrypt a workspace's notebook and -secret data in the control plane, as well as Databricks SQL queries and query history. -* Storage: A key configuration can be used to encrypt a workspace's DBFS and EBS data in -the data plane. - -In both of these cases, the key configuration's ID is used when creating a new workspace. -This Preview feature is available if your account is on the E2 version of the platform. -Updating a running workspace with workspace storage encryption requires that the workspace -is on the E2 version of the platform. If you have an older workspace, it might not be on -the E2 version of the platform. If you are not sure, contact your Databricks representative. - -### `databricks account encryption-keys create` - Create encryption key configuration. - -Creates a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If the key is assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for workspace storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data. - -**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions. - -This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account encryption-keys delete` - Delete encryption key configuration. - -Deletes a customer-managed key configuration object for an account. You cannot delete a configuration that is associated with a running workspace. - -### `databricks account encryption-keys get` - Get encryption key configuration. - -Gets a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data. - -**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions. - -This operation is available only if your account is on the E2 version of the platform. - -### `databricks account encryption-keys list` - Get all encryption key configurations. - -Gets all customer-managed key configuration objects for an account. If the key is specified as a workspace's managed services customer-managed key, Databricks uses the key to encrypt the workspace's notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If the key is specified as a workspace's storage customer-managed key, the key is used to encrypt the workspace's root S3 bucket and optionally can encrypt cluster EBS volumes data in the data plane. - -**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions. - -This operation is available only if your account is on the E2 version of the platform. - -## `databricks experiments` - Manage MLflow experiments - -### `databricks experiments create-experiment` - Create experiment. - -Creates an experiment with a name. Returns the ID of the newly created experiment. -Validates that another experiment with the same name does not already exist and fails -if another experiment with the same name already exists. - -Throws `RESOURCE_ALREADY_EXISTS` if a experiment with the given name exists. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--artifact-location` - Location where all artifacts for the experiment are stored. - -### `databricks experiments create-run` - Create a run. - -Creates a new run within an experiment. -A run is usually a single execution of a machine learning or data ETL pipeline. -MLflow uses runs to track the `mlflowParam`, `mlflowMetric` and `mlflowRunTag` associated with a single execution. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--experiment-id` - ID of the associated experiment. - * `--start-time` - Unix timestamp in milliseconds of when the run started. - * `--user-id` - ID of the user executing the run. - -### `databricks experiments delete-experiment` - Delete an experiment. - -Marks an experiment and associated metadata, runs, metrics, params, and tags for deletion. -If the experiment uses FileStore, artifacts associated with experiment are also deleted. - -### `databricks experiments delete-run` - Delete a run. - -Marks a run for deletion. - -### `databricks experiments delete-tag` - Delete a tag. - -Deletes a tag on a run. Tags are run metadata that can be updated during a run and after a run completes. - -### `databricks experiments get-by-name` - Get metadata. - -Gets metadata for an experiment. - -This endpoint will return deleted experiments, but prefers the active experiment if an active and deleted experiment -share the same name. If multiple deleted experiments share the same name, the API will return one of them. - -Throws `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name exists.S - -### `databricks experiments get-experiment` - Get an experiment. - -Gets metadata for an experiment. This method works on deleted experiments. -Flags: - - - -### `databricks experiments get-history` - Get history of a given metric within a run. - -Gets a list of all values for the specified metric for a given run. - -Flags: - * `--max-results` - Maximum number of Metric records to return per paginated request. - * `--run-id` - ID of the run from which to fetch metric values. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run from which to fetch metric values. - -### `databricks experiments get-run` - Get a run. - -Gets the metadata, metrics, params, and tags for a run. -In the case where multiple metrics with the same key are logged for a run, return only the value -with the latest timestamp. - -If there are multiple values with the latest timestamp, return the maximum of these values. - -Flags: - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run to fetch. - -### `databricks experiments list-artifacts` - Get all artifacts. - -List artifacts for a run. Takes an optional `artifact_path` prefix. If it is specified, the response contains only artifacts with the specified prefix.", - -Flags: - * `--path` - Filter artifacts matching this path (a relative path from the root artifact directory). - * `--run-id` - ID of the run whose artifacts to list. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run whose artifacts to list. - -### `databricks experiments list-experiments` - List experiments. - -List experiments. - -Gets a list of all experiments. -Flags: - * `--max-results` - Maximum number of experiments desired. - * `--view-type` - Qualifier for type of experiments to be returned. - -### `databricks experiments log-batch` - Log a batch. - -Logs a batch of metrics, params, and tags for a run. -If any data failed to be persisted, the server will respond with an error (non-200 status code). - -In case of error (due to internal server error or an invalid request), partial data may be written. - -You can write metrics, params, and tags in interleaving fashion, but within a given entity type are guaranteed to follow -the order specified in the request body. - -The overwrite behavior for metrics, params, and tags is as follows: - -* Metrics: metric values are never overwritten. - Logging a metric (key, value, timestamp) appends to the set of values for the metric with the provided key. - -* Tags: tag values can be overwritten by successive writes to the same tag key. - That is, if multiple tag values with the same key are provided in the same API request, - the last-provided tag value is written. Logging the same tag (key, value) is permitted. Specifically, logging a tag is idempotent. - -* Parameters: once written, param values cannot be changed (attempting to overwrite a param value will result in an error). - However, logging the same param (key, value) is permitted. Specifically, logging a param is idempotent. - - Request Limits - ------------------------------- - A single JSON-serialized API request may be up to 1 MB in size and contain: - - * No more than 1000 metrics, params, and tags in total - * Up to 1000 metrics - Up to 100 params - * Up to 100 tags - - For example, a valid request might contain 900 metrics, 50 params, and 50 tags, but logging 900 metrics, 50 params, - and 51 tags is invalid. - - The following limits also apply to metric, param, and tag keys and values: - - * Metric keyes, param keys, and tag keys can be up to 250 characters in length - * Parameter and tag values can be up to 250 characters in length - - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--run-id` - ID of the run to log under. - -### `databricks experiments log-metric` - Log a metric. - -Logs a metric for a run. A metric is a key-value pair (string key, float value) with an associated timestamp. -Examples include the various metrics that represent ML model accuracy. A metric can be logged multiple times. - -Flags: - * `--run-id` - ID of the run under which to log the metric. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run under which to log the metric. - * `--step` - Step at which to log the metric. - -### `databricks experiments log-model` - Log a model. - -**NOTE:** Experimental: command may change or be removed in a future release without warning. - -Flags: - * `--model-json` - MLmodel file in json format. - * `--run-id` - ID of the run to log under. - -### `databricks experiments log-param` - Log a param. - -Logs a param used for a run. A param is a key-value pair (string key, string value). -Examples include hyperparameters used for ML model training and constant dates and values used in an ETL pipeline. -A param can be logged only once for a run. - -Flags: - * `--run-id` - ID of the run under which to log the param. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run under which to log the param. - -### `databricks experiments restore-experiment` - Restores an experiment. - -Restore an experiment marked for deletion. This also restores associated metadata, runs, metrics, params, and tags. If experiment uses FileStore, underlying artifacts associated with experiment are also restored. Throws `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was permanently deleted.", - -### `databricks experiments restore-run` - Restore a run. - -Restores a deleted run. - -### `databricks experiments search-experiments` - Search experiments. - -Searches for experiments that satisfy specified search criteria. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - String representing a SQL filter condition (e.g. - * `--max-results` - Maximum number of experiments desired. - * `--view-type` - Qualifier for type of experiments to be returned. - -### `databricks experiments search-runs` - Search for runs. - -Searches for runs that satisfy expressions. - -Search expressions can use `mlflowMetric` and `mlflowParam` keys.", - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - A filter expression over params, metrics, and tags, that allows returning a subset of runs. - * `--max-results` - Maximum number of runs desired. - * `--run-view-type` - Whether to display only active, only deleted, or all runs. - -### `databricks experiments set-experiment-tag` - Set a tag. - -Sets a tag on an experiment. Experiment tags are metadata that can be updated. - -### `databricks experiments set-tag` - Set a tag. - -Sets a tag on a run. Tags are run metadata that can be updated during a run and after -a run completes. -Flags: - * `--run-id` - ID of the run under which to log the tag. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run under which to log the tag. - -### `databricks experiments update-experiment` - Update an experiment. - -Updates experiment metadata. -Flags: - * `--new-name` - If provided, the experiment's name is changed to the new name. - -### `databricks experiments update-run` - Update a run. - -Updates run metadata. -Flags: - * `--end-time` - Unix timestamp in milliseconds of when the run ended. - * `--run-id` - ID of the run to update. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run to update. - * `--status` - Updated status of the run. - -## `databricks external-locations` - manage cloud storage path with a storage credential that authorizes access to it. - -An external location is an object that combines a cloud storage path with a storage -credential that authorizes access to the cloud storage path. Each external location is -subject to Unity Catalog access-control policies that control which users and groups can -access the credential. If a user does not have access to an external location in Unity -Catalog, the request fails and Unity Catalog does not attempt to authenticate to your cloud -tenant on the user’s behalf. - -Databricks recommends using external locations rather than using storage credentials -directly. - -To create external locations, you must be a metastore admin or a user with the -**CREATE_EXTERNAL_LOCATION** privilege. - -### `databricks external-locations create` - Create an external location. - -Creates a new external location entry in the metastore. -The caller must be a metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both the metastore and the associated storage credential. - -Flags: - * `--comment` - User-provided free-form text description. - * `--read-only` - Indicates whether the external location is read-only. - * `--skip-validation` - Skips validation of the storage credential associated with the external location. - -### `databricks external-locations delete` - Delete an external location. - -Deletes the specified external location from the metastore. The caller must be the owner of the external location. - -Flags: - * `--force` - Force deletion even if there are dependent external tables or mounts. - -### `databricks external-locations get` - Get an external location. - -Gets an external location from the metastore. The caller must be either a metastore admin, the owner of the external location, or a user that has some privilege on the external location. - -### `databricks external-locations list` - List external locations. - -Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. -The caller must be a metastore admin, the owner of the external location, or a user that has some privilege on the external location. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks external-locations update` - Update an external location. - -Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. -In the second case, the admin can only update the name of the external location. - -Flags: - * `--comment` - User-provided free-form text description. - * `--credential-name` - Name of the storage credential used with this location. - * `--force` - Force update even if changing url invalidates dependent external tables or mounts. - * `--name` - Name of the external location. - * `--owner` - The owner of the external location. - * `--read-only` - Indicates whether the external location is read-only. - * `--url` - Path URL of the external location. - -## `databricks functions` - Functions implement User-Defined Functions (UDFs) in Unity Catalog. - -The function implementation can be any SQL expression or Query, and it can be invoked wherever a table reference is allowed in a query. -In Unity Catalog, a function resides at the same level as a table, so it can be referenced with the form __catalog_name__.__schema_name__.__function_name__. - -### `databricks functions create` - Create a function. - -Creates a new function - -The user must have the following permissions in order for the function to be created: -- **USE_CATALOG** on the function's parent catalog -- **USE_SCHEMA** and **CREATE_FUNCTION** on the function's parent schema - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body -* `--comment` - User-provided free-form text description. - * `--external-language` - External function language. - * `--external-name` - External function name. - * `--sql-path` - List of schemes whose objects can be referenced without qualification. - -### `databricks functions delete` - Delete a function. - -Deletes the function that matches the supplied name. -For the deletion to succeed, the user must satisfy one of the following conditions: -- Is the owner of the function's parent catalog -- Is the owner of the function's parent schema and have the **USE_CATALOG** privilege on its parent catalog -- Is the owner of the function itself and have both the **USE_CATALOG** privilege on its parent catalog and the **USE_SCHEMA** privilege on its parent schema - -Flags: - * `--force` - Force deletion even if the function is notempty. - -### `databricks functions get` - Get a function. - -Gets a function from within a parent catalog and schema. -For the fetch to succeed, the user must satisfy one of the following requirements: -- Is a metastore admin -- Is an owner of the function's parent catalog -- Have the **USE_CATALOG** privilege on the function's parent catalog and be the owner of the function -- Have the **USE_CATALOG** privilege on the function's parent catalog, the **USE_SCHEMA** privilege on the function's parent schema, and the **EXECUTE** privilege on the function itself - -### `databricks functions list` - List functions. - -List functions within the specified parent catalog and schema. -If the user is a metastore admin, all functions are returned in the output list. -Otherwise, the user must have the **USE_CATALOG** privilege on the catalog and the **USE_SCHEMA** privilege on the schema, and the output list contains only functions for which either the user has the **EXECUTE** privilege or the user is the owner. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks functions update` - Update a function. - -Updates the function that matches the supplied name. -Only the owner of the function can be updated. If the user is not a metastore admin, the user must be a member of the group that is the new function owner. -- Is a metastore admin -- Is the owner of the function's parent catalog -- Is the owner of the function's parent schema and has the **USE_CATALOG** privilege on its parent catalog -- Is the owner of the function itself and has the **USE_CATALOG** privilege on its parent catalog as well as the **USE_SCHEMA** privilege on the function's parent schema. - -Flags: - * `--owner` - Username of current owner of function. - -## `databricks git-credentials` - Registers personal access token for Databricks to do operations on behalf of the user. - -See [more info](https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html). - -### `databricks git-credentials create` - Create a credential entry. - -Creates a Git credential entry for the user. Only one Git credential per user is -supported, so any attempts to create credentials if an entry already exists will -fail. Use the PATCH endpoint to update existing credentials, or the DELETE endpoint to -delete existing credentials. - -Flags: - * `--git-username` - Git username. - * `--personal-access-token` - The personal access token used to authenticate to the corresponding Git provider. - -### `databricks git-credentials delete` - Delete a credential. - -Deletes the specified Git credential. - -### `databricks git-credentials get` - Get a credential entry. - -Gets the Git credential with the specified credential ID. - -### `databricks git-credentials list` - Get Git credentials. - -Lists the calling user's Git credentials. One credential per user is supported. - -### `databricks git-credentials update` - Update a credential. - -Updates the specified Git credential. -Flags: - * `--git-provider` - Git provider. - * `--git-username` - Git username. - * `--personal-access-token` - The personal access token used to authenticate to the corresponding Git provider. - -## `databricks global-init-scripts` - configure global initialization scripts for the workspace. - -The Global Init Scripts API enables Workspace administrators to configure global -initialization scripts for their workspace. These scripts run on every node in every cluster -in the workspace. - -**Important:** Existing clusters must be restarted to pick up any changes made to global -init scripts. -Global init scripts are run in order. If the init script returns with a bad exit code, -the Apache Spark container fails to launch and init scripts with later position are skipped. -If enough containers fail, the entire cluster fails with a `GLOBAL_INIT_SCRIPT_FAILURE` -error code. - -### `databricks global-init-scripts create` - Create init script. - -Creates a new global init script in this workspace. -Flags: - * `--enabled` - Specifies whether the script is enabled. - * `--position` - The position of a global init script, where 0 represents the first script to run, 1 is the second script to run, in ascending order. - -### `databricks global-init-scripts delete` - Delete init script. - -Deletes a global init script. - -### `databricks global-init-scripts get` - Get an init script. - -Gets all the details of a script, including its Base64-encoded contents. - -### `databricks global-init-scripts list` - Get init scripts. - -Get a list of all global init scripts for this workspace. This returns all properties for each script but **not** the script contents. -To retrieve the contents of a script, use the [get a global init script](#operation/get-script) operation. - -### `databricks global-init-scripts update` - Update init script. - -Updates a global init script, specifying only the fields to change. All fields are optional. -Unspecified fields retain their current value. - -Flags: - * `--enabled` - Specifies whether the script is enabled. - * `--position` - The position of a script, where 0 represents the first script to run, 1 is the second script to run, in ascending order. - -## `databricks grants` - Manage data access in Unity Catalog. - -In Unity Catalog, data is secure by default. Initially, users have no access to data in -a metastore. Access can be granted by either a metastore admin, the owner of an object, or -the owner of the catalog or schema that contains the object. Securable objects in Unity -Catalog are hierarchical and privileges are inherited downward. - -Securable objects in Unity Catalog are hierarchical and privileges are inherited downward. -This means that granting a privilege on the catalog automatically grants the privilege to -all current and future objects within the catalog. Similarly, privileges granted on a schema -are inherited by all current and future objects within that schema. - -### `databricks grants get` - Get permissions. - -Gets the permissions for a securable. - -Flags: - * `--principal` - If provided, only the permissions for the specified principal (user or group) are returned. - -### `databricks grants get-effective` - Get effective permissions. - -Gets the effective permissions for a securable. -Flags: - * `--principal` - If provided, only the effective permissions for the specified principal (user or group) are returned. - -### `databricks grants update` - Update permissions. - -Updates the permissions for a securable. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks groups` - Groups for identity management. - -Groups simplify identity management, making it easier to assign access to Databricks Workspace, data, -and other securable objects. - -It is best practice to assign access to workspaces and access-control policies in -Unity Catalog to groups, instead of to users individually. All Databricks Workspace identities can be -assigned as members of groups, and members inherit permissions that are assigned to their -group. - -### `databricks groups create` - Create a new group. - -Creates a group in the Databricks Workspace with a unique name, using the supplied group details. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--display-name` - String that represents a human-readable group name. - * `--external-id` - - * `--id` - Databricks group ID. - -### `databricks groups delete` - Delete a group. - -Deletes a group from the Databricks Workspace. - -### `databricks groups get` - Get group details. - -Gets the information for a specific group in the Databricks Workspace. - -### `databricks groups list` - List group details. - -Gets all details of the groups associated with the Databricks Workspace. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - -### `databricks groups patch` - Update group details. - -Partially updates the details of a group. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks groups update` - Replace a group. - -Updates the details of a group by replacing the entire group entity. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--display-name` - String that represents a human-readable group name. - * `--external-id` - - * `--id` - Databricks group ID. - -## `databricks account groups` - Account-level group management - -Groups simplify identity management, making it easier to assign access to Databricks Account, data, -and other securable objects. - -It is best practice to assign access to workspaces and access-control policies in -Unity Catalog to groups, instead of to users individually. All Databricks Account identities can be -assigned as members of groups, and members inherit permissions that are assigned to their -group. - -### `databricks account groups create` - Create a new group. - -Creates a group in the Databricks Account with a unique name, using the supplied group details. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--display-name` - String that represents a human-readable group name. - * `--external-id` - - * `--id` - Databricks group ID. - -### `databricks account groups delete` - Delete a group. - -Deletes a group from the Databricks Account. - -### `databricks account groups get` - Get group details. - -Gets the information for a specific group in the Databricks Account. - -### `databricks account groups list` - List group details. - -Gets all details of the groups associated with the Databricks Account. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - -### `databricks account groups patch` - Update group details. - -Partially updates the details of a group. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account groups update` - Replace a group. - -Updates the details of a group by replacing the entire group entity. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--display-name` - String that represents a human-readable group name. - * `--external-id` - - * `--id` - Databricks group ID. - -## `databricks instance-pools` - manage ready-to-use cloud instances which reduces a cluster start and auto-scaling times. - -Instance Pools API are used to create, edit, delete and list instance pools by using -ready-to-use cloud instances which reduces a cluster start and auto-scaling times. - -Databricks pools reduce cluster start and auto-scaling times by maintaining a set of idle, -ready-to-use instances. When a cluster is attached to a pool, cluster nodes are created using -the pool’s idle instances. If the pool has no idle instances, the pool expands by allocating -a new instance from the instance provider in order to accommodate the cluster’s request. -When a cluster releases an instance, it returns to the pool and is free for another cluster -to use. Only clusters attached to a pool can use that pool’s idle instances. - -You can specify a different pool for the driver node and worker nodes, or use the same pool -for both. - -Databricks does not charge DBUs while instances are idle in the pool. Instance provider -billing does apply. See pricing. - -### `databricks instance-pools create` - Create a new instance pool. - - -Creates a new instance pool using idle and ready-to-use cloud instances. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--enable-elastic-disk` - Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space. - * `--idle-instance-autotermination-minutes` - Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met. - * `--max-capacity` - Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances. - * `--min-idle-instances` - Minimum number of idle instances to keep in the instance pool. - -### `databricks instance-pools delete` - Delete an instance pool. - -Deletes the instance pool permanently. The idle instances in the pool are terminated asynchronously. - -### `databricks instance-pools edit` - Edit an existing instance pool. - -Modifies the configuration of an existing instance pool. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--enable-elastic-disk` - Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space. - * `--idle-instance-autotermination-minutes` - Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met. - * `--max-capacity` - Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances. - * `--min-idle-instances` - Minimum number of idle instances to keep in the instance pool. - -### `databricks instance-pools get` - Get instance pool information. - -Retrieve the information for an instance pool based on its identifier. - -### `databricks instance-pools list` - List instance pool info. - -Gets a list of instance pools with their statistics. - -## `databricks instance-profiles` - Manage instance profiles that users can launch clusters with. - -The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch -clusters with. Regular users can list the instance profiles available to them. -See [Secure access to S3 buckets](https://docs.databricks.com/administration-guide/cloud-configurations/aws/instance-profiles.html) using -instance profiles for more information. - -### `databricks instance-profiles add` - Register an instance profile. - -In the UI, you can select the instance profile when launching clusters. command is only available to admin users. - -Flags: - * `--iam-role-arn` - The AWS IAM role ARN of the role associated with the instance profile. - * `--is-meta-instance-profile` - By default, Databricks validates that it has sufficient permissions to launch instances with the instance profile. - * `--skip-validation` - By default, Databricks validates that it has sufficient permissions to launch instances with the instance profile. - -### `databricks instance-profiles edit` - Edit an instance profile. - -The only supported field to change is the optional IAM role ARN associated with -the instance profile. It is required to specify the IAM role ARN if both of -the following are true: - - * Your role name and instance profile name do not match. The name is the part - after the last slash in each ARN. - * You want to use the instance profile with [Databricks SQL Serverless](https://docs.databricks.com/sql/admin/serverless.html). - -To understand where these fields are in the AWS console, see -[Enable serverless SQL warehouses](https://docs.databricks.com/sql/admin/serverless.html). - -command is only available to admin users. - -Flags: - * `--iam-role-arn` - The AWS IAM role ARN of the role associated with the instance profile. - * `--is-meta-instance-profile` - By default, Databricks validates that it has sufficient permissions to launch instances with the instance profile. - -### `databricks instance-profiles list` - List available instance profiles. - -List the instance profiles that the calling user can use to launch a cluster. - -command is available to all users. - -### `databricks instance-profiles remove` - Remove the instance profile. - -Remove the instance profile with the provided ARN. -Existing clusters with this instance profile will continue to function. - -command is only accessible to admin users. - -## `databricks ip-access-lists` - enable admins to configure IP access lists. - -IP Access List enables admins to configure IP access lists. - -IP access lists affect web application access and commands access to this workspace only. -If the feature is disabled for a workspace, all access is allowed for this workspace. -There is support for allow lists (inclusion) and block lists (exclusion). - -When a connection is attempted: - 1. **First, all block lists are checked.** If the connection IP address matches any block list, the connection is rejected. - 2. **If the connection was not rejected by block lists**, the IP address is compared with the allow lists. - -If there is at least one allow list for the workspace, the connection is allowed only if the IP address matches an allow list. -If there are no allow lists for the workspace, all IP addresses are allowed. - -For all allow lists and block lists combined, the workspace supports a maximum of 1000 IP/CIDR values, where one CIDR counts as a single value. - -After changes to the IP access list feature, it can take a few minutes for changes to take effect. - -### `databricks ip-access-lists create` - Create access list. - -Creates an IP access list for this workspace. - -A list can be an allow list or a block list. -See the top of this file for a description of how the server treats allow lists and block lists at runtime. - -When creating or updating an IP access list: - - * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, - where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. - * If the new list would block the calling user's current IP, error 400 is returned with `error_code` value `INVALID_STATE`. - -It can take a few minutes for the changes to take effect. -**Note**: Your new IP access list has no effect until you enable the feature. See :method:workspaceconf/setStatus - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks ip-access-lists delete` - Delete access list. - -Deletes an IP access list, specified by its list ID. - -### `databricks ip-access-lists get` - Get access list. - -Gets an IP access list, specified by its list ID. - -### `databricks ip-access-lists list` - Get access lists. - -Gets all IP access lists for the specified workspace. - -### `databricks ip-access-lists replace` - Replace access list. - -Replaces an IP access list, specified by its ID. - -A list can include allow lists and block lists. See the top -of this file for a description of how the server treats allow lists and block lists at run time. When -replacing an IP access list: - * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, - where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` - value `QUOTA_EXCEEDED`. - * If the resulting list would block the calling user's current IP, error 400 is returned with `error_code` - value `INVALID_STATE`. -It can take a few minutes for the changes to take effect. Note that your resulting IP access list has no -effect until you enable the feature. See :method:workspaceconf/setStatus. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--list-id` - Universally unique identifier (UUID) of the IP access list. - -### `databricks ip-access-lists update` - Update access list. - -Updates an existing IP access list, specified by its ID. - -A list can include allow lists and block lists. -See the top of this file for a description of how the server treats allow lists and block lists at run time. - -When updating an IP access list: - - * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, - where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. - * If the updated list would block the calling user's current IP, error 400 is returned with `error_code` value `INVALID_STATE`. - -It can take a few minutes for the changes to take effect. Note that your resulting IP access list has no effect until you enable -the feature. See :method:workspaceconf/setStatus. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--list-id` - Universally unique identifier (UUID) of the IP access list. - -## `databricks account ip-access-lists` - The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. - -The Accounts IP Access List API enables account admins to configure IP access lists for -access to the account console. - -Account IP Access Lists affect web application access and commands access to the account -console and account APIs. If the feature is disabled for the account, all access is allowed -for this account. There is support for allow lists (inclusion) and block lists (exclusion). - -When a connection is attempted: - 1. **First, all block lists are checked.** If the connection IP address matches any block - list, the connection is rejected. - 2. **If the connection was not rejected by block lists**, the IP address is compared with - the allow lists. - -If there is at least one allow list for the account, the connection is allowed only if the -IP address matches an allow list. If there are no allow lists for the account, all IP -addresses are allowed. - -For all allow lists and block lists combined, the account supports a maximum of 1000 IP/CIDR -values, where one CIDR counts as a single value. - -After changes to the account-level IP access lists, it can take a few minutes for changes -to take effect. - -### `databricks account ip-access-lists create` - Create access list. - -Creates an IP access list for the account. - -A list can be an allow list or a block list. See the top of this file for a description of -how the server treats allow lists and block lists at runtime. - -When creating or updating an IP access list: - - * For all allow lists and block lists combined, the API supports a maximum of 1000 - IP/CIDR values, where one CIDR counts as a single value. Attempts to exceed that number - return error 400 with `error_code` value `QUOTA_EXCEEDED`. - * If the new list would block the calling user's current IP, error 400 is returned with - `error_code` value `INVALID_STATE`. - -It can take a few minutes for the changes to take effect. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account ip-access-lists delete` - Delete access list. - -Deletes an IP access list, specified by its list ID. - -### `databricks account ip-access-lists get` - Get IP access list. - -Gets an IP access list, specified by its list ID. - -### `databricks account ip-access-lists list` - Get access lists. - -Gets all IP access lists for the specified account. - -### `databricks account ip-access-lists replace` - Replace access list. - -Replaces an IP access list, specified by its ID. - -A list can include allow lists and block lists. See the top of this file for a description -of how the server treats allow lists and block lists at run time. When replacing an IP -access list: - * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, - where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` - value `QUOTA_EXCEEDED`. - * If the resulting list would block the calling user's current IP, error 400 is returned with `error_code` - value `INVALID_STATE`. -It can take a few minutes for the changes to take effect. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--list-id` - Universally unique identifier (UUID) of the IP access list. - -### `databricks account ip-access-lists update` - Update access list. - -Updates an existing IP access list, specified by its ID. - -A list can include allow lists and block lists. See the top of this file for a description -of how the server treats allow lists and block lists at run time. - -When updating an IP access list: - - * For all allow lists and block lists combined, the API supports a maximum of 1000 - IP/CIDR values, where one CIDR counts as a single value. Attempts to exceed that number - return error 400 with `error_code` value `QUOTA_EXCEEDED`. - * If the updated list would block the calling user's current IP, error 400 is returned - with `error_code` value `INVALID_STATE`. - -It can take a few minutes for the changes to take effect. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--list-id` - Universally unique identifier (UUID) of the IP access list. - -## `databricks jobs` - Manage Databricks Workflows. - -You can use a Databricks job to run a data processing or data analysis task in a Databricks -cluster with scalable resources. Your job can consist of a single task or can be a large, -multi-task workflow with complex dependencies. Databricks manages the task orchestration, -cluster management, monitoring, and error reporting for all of your jobs. You can run your -jobs immediately or periodically through an easy-to-use scheduling system. You can implement -job tasks using notebooks, JARS, Delta Live Tables pipelines, or Python, Scala, Spark -submit, and Java applications. - -You should never hard code secrets or store them in plain text. Use the :service:secrets to manage secrets in the -[Databricks CLI](https://docs.databricks.com/dev-tools/cli/index.html). -Use the [Secrets utility](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-secrets) to reference secrets in notebooks and jobs. - -### `databricks jobs cancel-all-runs` - Cancel all runs of a job. - -Cancels all active runs of a job. The runs are canceled asynchronously, so it doesn't -prevent new runs from being started. - -### `databricks jobs cancel-run` - Cancel a job run. - -Cancels a job run. The run is canceled asynchronously, so it may still be running when -this request completes. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED or SKIPPED state. - * `--timeout` - maximum amount of time to reach TERMINATED or SKIPPED state. - -### `databricks jobs create` - Create a new job. - -Create a new job. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--format` - Used to tell what is the format of the job. - * `--max-concurrent-runs` - An optional maximum allowed number of concurrent runs of the job. - * `--name` - An optional name for the job. - * `--timeout-seconds` - An optional timeout applied to each run of this job. - -### `databricks jobs delete` - Delete a job. - -Deletes a job. - -### `databricks jobs delete-run` - Delete a job run. - -Deletes a non-active run. Returns an error if the run is active. - -### `databricks jobs export-run` - Export and retrieve a job run. - -Export and retrieve the job run task. - -Flags: - * `--views-to-export` - Which views to export (CODE, DASHBOARDS, or ALL). - -### `databricks jobs get` - Get a single job. - -Retrieves the details for a single job. - -### `databricks jobs get-run` - Get a single job run. - -Retrieve the metadata of a run. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED or SKIPPED state. - * `--timeout` - maximum amount of time to reach TERMINATED or SKIPPED state. - * `--include-history` - Whether to include the repair history in the response. - -### `databricks jobs get-run-output` - Get the output for a single run. - -Retrieve the output and metadata of a single task run. When a notebook task returns -a value through the `dbutils.notebook.exit()` call, you can use this endpoint to retrieve -that value. Databricks restricts command to returning the first 5 MB of the output. -To return a larger result, you can store job results in a cloud storage service. - -This endpoint validates that the __run_id__ parameter is valid and returns an HTTP status -code 400 if the __run_id__ parameter is invalid. Runs are automatically removed after -60 days. If you to want to reference them beyond 60 days, you must save old run results -before they expire. - -### `databricks jobs list` - List all jobs. - -Retrieves a list of jobs. - -Flags: - * `--expand-tasks` - Whether to include task and cluster details in the response. - * `--limit` - The number of jobs to return. - * `--name` - A filter on the list based on the exact (case insensitive) job name. - * `--offset` - The offset of the first job to return, relative to the most recently created job. - -### `databricks jobs list-runs` - List runs for a job. - -List runs in descending order by start time. - -Flags: - * `--active-only` - If active_only is `true`, only active runs are included in the results; otherwise, lists both active and completed runs. - * `--completed-only` - If completed_only is `true`, only completed runs are included in the results; otherwise, lists both active and completed runs. - * `--expand-tasks` - Whether to include task and cluster details in the response. - * `--job-id` - The job for which to list runs. - * `--limit` - The number of runs to return. - * `--offset` - The offset of the first run to return, relative to the most recent run. - * `--run-type` - The type of runs to return. - * `--start-time-from` - Show runs that started _at or after_ this value. - * `--start-time-to` - Show runs that started _at or before_ this value. - -### `databricks jobs repair-run` - Repair a job run. - -Re-run one or more tasks. Tasks are re-run as part of the original job run. -They use the current job and task settings, and can be viewed in the history for the -original job run. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED or SKIPPED state. - * `--timeout` - maximum amount of time to reach TERMINATED or SKIPPED state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--latest-repair-id` - The ID of the latest repair. - * `--rerun-all-failed-tasks` - If true, repair all failed tasks. - -### `databricks jobs reset` - Overwrites all settings for a job. - -Overwrites all the settings for a specific job. Use the Update endpoint to update job settings partially. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks jobs run-now` - Trigger a new job run. - -Run a job and return the `run_id` of the triggered run. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED or SKIPPED state. - * `--timeout` - maximum amount of time to reach TERMINATED or SKIPPED state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--idempotency-token` - An optional token to guarantee the idempotency of job run requests. - -### `databricks jobs submit` - Create and trigger a one-time run. - -Submit a one-time run. This endpoint allows you to submit a workload directly without -creating a job. Runs submitted using this endpoint don’t display in the UI. Use the -`jobs/runs/get` API to check the run state after the job is submitted. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED or SKIPPED state. - * `--timeout` - maximum amount of time to reach TERMINATED or SKIPPED state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--idempotency-token` - An optional token that can be used to guarantee the idempotency of job run requests. - * `--run-name` - An optional name for the run. - * `--timeout-seconds` - An optional timeout applied to each run of this job. - -### `databricks jobs update` - Partially updates a job. - -Add, update, or remove specific settings of an existing job. Use the ResetJob to overwrite all job settings. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks libraries` - Manage libraries on a cluster. - -The Libraries API allows you to install and uninstall libraries and get the status of -libraries on a cluster. - -To make third-party or custom code available to notebooks and jobs running on your clusters, -you can install a library. Libraries can be written in Python, Java, Scala, and R. You can -upload Java, Scala, and Python libraries and point to external packages in PyPI, Maven, and -CRAN repositories. - -Cluster libraries can be used by all notebooks running on a cluster. You can install a cluster -library directly from a public repository such as PyPI or Maven, using a previously installed -workspace library, or using an init script. - -When you install a library on a cluster, a notebook already attached to that cluster will not -immediately see the new library. You must first detach and then reattach the notebook to -the cluster. - -When you uninstall a library from a cluster, the library is removed only when you restart -the cluster. Until you restart the cluster, the status of the uninstalled library appears -as Uninstall pending restart. - -### `databricks libraries all-cluster-statuses` - Get all statuses. - -Get the status of all libraries on all clusters. A status will be available for all libraries installed on this cluster -via the API or the libraries UI as well as libraries set to be installed on all clusters via the libraries UI. - -### `databricks libraries cluster-status` - Get status. - -Get the status of libraries on a cluster. A status will be available for all libraries installed on this cluster via the API -or the libraries UI as well as libraries set to be installed on all clusters via the libraries UI. -The order of returned libraries will be as follows. - -1. Libraries set to be installed on this cluster will be returned first. - Within this group, the final order will be order in which the libraries were added to the cluster. - -2. Libraries set to be installed on all clusters are returned next. - Within this group there is no order guarantee. - -3. Libraries that were previously requested on this cluster or on all clusters, but now marked for removal. - Within this group there is no order guarantee. - -### `databricks libraries install` - Add a library. - -Add libraries to be installed on a cluster. -The installation is asynchronous; it happens in the background after the completion of this request. - -**Note**: The actual set of libraries to be installed on a cluster is the union of the libraries specified via this method and -the libraries set to be installed on all clusters via the libraries UI. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks libraries uninstall` - Uninstall libraries. - -Set libraries to be uninstalled on a cluster. The libraries won't be uninstalled until the cluster is restarted. -Uninstalling libraries that are not installed on the cluster will have no impact but is not an error. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks account log-delivery` - These commands manage log delivery configurations for this account. - -These commands manage log delivery configurations for this account. The two supported log types -for command are _billable usage logs_ and _audit logs_. This feature is in Public Preview. -This feature works with all account ID types. - -Log delivery works with all account types. However, if your account is on the E2 version of -the platform or on a select custom plan that allows multiple workspaces per account, you can -optionally configure different storage destinations for each workspace. Log delivery status -is also provided to know the latest status of log delivery attempts. -The high-level flow of billable usage delivery: - -1. **Create storage**: In AWS, [create a new AWS S3 bucket](https://docs.databricks.com/administration-guide/account-api/aws-storage.html) -with a specific bucket policy. Using Databricks APIs, call the Account API to create a [storage configuration object](#operation/create-storage-config) -that uses the bucket name. -2. **Create credentials**: In AWS, create the appropriate AWS IAM role. For full details, -including the required IAM role policies and trust relationship, see -[Billable usage log delivery](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html). -Using Databricks APIs, call the Account API to create a [credential configuration object](#operation/create-credential-config) -that uses the IAM role's ARN. -3. **Create log delivery configuration**: Using Databricks APIs, call the Account API to -[create a log delivery configuration](#operation/create-log-delivery-config) that uses -the credential and storage configuration objects from previous steps. You can specify if -the logs should include all events of that log type in your account (_Account level_ delivery) -or only events for a specific set of workspaces (_workspace level_ delivery). Account level -log delivery applies to all current and future workspaces plus account level logs, while -workspace level log delivery solely delivers logs related to the specified workspaces. -You can create multiple types of delivery configurations per account. - -For billable usage delivery: -* For more information about billable usage logs, see -[Billable usage log delivery](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html). -For the CSV schema, see the [Usage page](https://docs.databricks.com/administration-guide/account-settings/usage.html). -* The delivery location is `//billable-usage/csv/`, where `` is -the name of the optional delivery path prefix you set up during log delivery configuration. -Files are named `workspaceId=-usageMonth=.csv`. -* All billable usage logs apply to specific workspaces (_workspace level_ logs). You can -aggregate usage for your entire account by creating an _account level_ delivery -configuration that delivers logs for all current and future workspaces in your account. -* The files are delivered daily by overwriting the month's CSV file for each workspace. - -For audit log delivery: -* For more information about about audit log delivery, see -[Audit log delivery](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html), -which includes information about the used JSON schema. -* The delivery location is `//workspaceId=/date=/auditlogs_.json`. -Files may get overwritten with the same content multiple times to achieve exactly-once delivery. -* If the audit log delivery configuration included specific workspace IDs, only -_workspace-level_ audit logs for those workspaces are delivered. If the log delivery -configuration applies to the entire account (_account level_ delivery configuration), -the audit log delivery includes workspace-level audit logs for all workspaces in the account -as well as account-level audit logs. See -[Audit log delivery](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html) for details. -* Auditable events are typically available in logs within 15 minutes. - -### `databricks account log-delivery create` - Create a new log delivery configuration. - -Creates a new Databricks log delivery configuration to enable delivery of the specified type of logs to your storage location. This requires that you already created a [credential object](#operation/create-credential-config) (which encapsulates a cross-account service IAM role) and a [storage configuration object](#operation/create-storage-config) (which encapsulates an S3 bucket). - -For full details, including the required IAM role policies and bucket policies, see [Deliver and access billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) or [Configure audit logging](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). - -**Note**: There is a limit on the number of log delivery configurations available per account (each limit applies separately to each log type including billable usage and audit logs). You can create a maximum of two enabled account-level delivery configurations (configurations without a workspace filter) per type. Additionally, you can create two enabled workspace-level delivery configurations per workspace for each log type, which means that the same workspace ID can occur in the workspace filter for no more than two delivery configurations per log type. - -You cannot delete a log delivery configuration, but you can disable it (see [Enable or disable log delivery configuration](#operation/patch-log-delivery-config-status)). - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account log-delivery get` - Get log delivery configuration. - -Gets a Databricks log delivery configuration object for an account, both specified by ID. - -### `databricks account log-delivery list` - Get all log delivery configurations. - -Gets all Databricks log delivery configurations associated with an account specified by ID. - -Flags: - * `--credentials-id` - Filter by credential configuration ID. - * `--status` - Filter by status `ENABLED` or `DISABLED`. - * `--storage-configuration-id` - Filter by storage configuration ID. - -### `databricks account log-delivery patch-status` - Enable or disable log delivery configuration. - -Enables or disables a log delivery configuration. Deletion of delivery configurations is not supported, so disable log delivery configurations that are no longer needed. Note that you can't re-enable a delivery configuration if this would violate the delivery configuration limits described under [Create log delivery](#operation/create-log-delivery-config). - -## `databricks account metastore-assignments` - These commands manage metastore assignments to a workspace. - -These commands manage metastore assignments to a workspace. - -### `databricks account metastore-assignments create` - Assigns a workspace to a metastore. - -Creates an assignment to a metastore for a workspace - -### `databricks account metastore-assignments delete` - Delete a metastore assignment. - -Deletes a metastore assignment to a workspace, leaving the workspace with no metastore. - -### `databricks account metastore-assignments get` - Gets the metastore assignment for a workspace. - -Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace -is assigned a metastore, the mappig will be returned. If no metastore is assigned to the -workspace, the assignment will not be found and a 404 returned. - -### `databricks account metastore-assignments list` - Get all workspaces assigned to a metastore. - -Gets a list of all Databricks workspace IDs that have been assigned to given metastore. - -### `databricks account metastore-assignments update` - Updates a metastore assignment to a workspaces. - -Updates an assignment to a metastore for a workspace. Currently, only the default catalog -may be updated - -Flags: - * `--default-catalog-name` - The name of the default catalog for the metastore. - * `--metastore-id` - The unique ID of the metastore. - -## `databricks metastores` - Manage metastores in Unity Catalog. - -A metastore is the top-level container of objects in Unity Catalog. It stores data assets -(tables and views) and the permissions that govern access to them. Databricks account admins -can create metastores and assign them to Databricks workspaces to control which workloads -use each metastore. For a workspace to use Unity Catalog, it must have a Unity Catalog -metastore attached. - -Each metastore is configured with a root storage location in a cloud storage account. -This storage location is used for metadata and managed tables data. - -NOTE: This metastore is distinct from the metastore included in Databricks workspaces -created before Unity Catalog was released. If your workspace includes a legacy Hive -metastore, the data in that metastore is available in a catalog named hive_metastore. - -### `databricks metastores assign` - Create an assignment. - -Creates a new metastore assignment. -If an assignment for the same __workspace_id__ exists, it will be overwritten by the new __metastore_id__ and -__default_catalog_name__. The caller must be an account admin. - -### `databricks metastores create` - Create a metastore. - -Creates a new metastore based on a provided name and storage root path. - -Flags: - * `--region` - Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). - -### `databricks metastores current` - Get metastore assignment for workspace. - -Gets the metastore assignment for the workspace being accessed. - -### `databricks metastores delete` - Delete a metastore. - -Deletes a metastore. The caller must be a metastore admin. - -Flags: - * `--force` - Force deletion even if the metastore is not empty. - -### `databricks metastores get` - Get a metastore. - -Gets a metastore that matches the supplied ID. The caller must be a metastore admin to retrieve this info. - -### `databricks metastores list` - List metastores. - -Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin to retrieve this info. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks metastores maintenance` - Enables or disables auto maintenance on the metastore. - -Enables or disables auto maintenance on the metastore. - -### `databricks metastores summary` - Get a metastore summary. - -Gets information about a metastore. This summary includes the storage credential, the cloud vendor, the cloud region, and the global metastore ID. - -### `databricks metastores unassign` - Delete an assignment. - -Deletes a metastore assignment. The caller must be an account administrator. - -### `databricks metastores update` - Update a metastore. - -Updates information for a specific metastore. The caller must be a metastore admin. - -Flags: - * `--delta-sharing-organization-name` - The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name. - * `--delta-sharing-recipient-token-lifetime-in-seconds` - The lifetime of delta sharing recipient token in seconds. - * `--delta-sharing-scope` - The scope of Delta Sharing enabled for the metastore. - * `--name` - The user-specified name of the metastore. - * `--owner` - The owner of the metastore. - * `--privilege-model-version` - Privilege model version of the metastore, of the form `major.minor` (e.g., `1.0`). - * `--storage-root-credential-id` - UUID of storage credential to access the metastore storage_root. - -### `databricks metastores update-assignment` - Update an assignment. - -Updates a metastore assignment. This operation can be used to update __metastore_id__ or __default_catalog_name__ -for a specified Workspace, if the Workspace is already assigned a metastore. -The caller must be an account admin to update __metastore_id__; otherwise, the caller can be a Workspace admin. - -Flags: - * `--default-catalog-name` - The name of the default catalog for the metastore. - * `--metastore-id` - The unique ID of the metastore. - -## `databricks account metastores` - These commands manage Unity Catalog metastores for an account. - -These commands manage Unity Catalog metastores for an account. A metastore contains catalogs -that can be associated with workspaces - -### `databricks account metastores create` - Create metastore. - -Creates a Unity Catalog metastore. - -Flags: - * `--region` - Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). - -### `databricks account metastores delete` - Delete a metastore. - -Deletes a Databricks Unity Catalog metastore for an account, both specified by ID. - -### `databricks account metastores get` - Get a metastore. - -Gets a Databricks Unity Catalog metastore from an account, both specified by ID. - -### `databricks account metastores list` - Get all metastores associated with an account. - -Gets all Unity Catalog metastores associated with an account specified by ID. - -### `databricks account metastores update` - Update a metastore. - -Updates an existing Unity Catalog metastore. - -Flags: - * `--delta-sharing-organization-name` - The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name. - * `--delta-sharing-recipient-token-lifetime-in-seconds` - The lifetime of delta sharing recipient token in seconds. - * `--delta-sharing-scope` - The scope of Delta Sharing enabled for the metastore. - * `--name` - The user-specified name of the metastore. - * `--owner` - The owner of the metastore. - * `--privilege-model-version` - Privilege model version of the metastore, of the form `major.minor` (e.g., `1.0`). - * `--storage-root-credential-id` - UUID of storage credential to access the metastore storage_root. - -## `databricks model-registry` - Expose commands for Model Registry. - -### `databricks model-registry approve-transition-request` - Approve transition request. - -Approves a model version stage transition request. - -Flags: - * `--comment` - User-provided comment on the action. - -### `databricks model-registry create-comment` - Post a comment. - -Posts a comment on a model version. A comment can be submitted either by a user or programmatically to display -relevant information about the model. For example, test results or deployment errors. - -### `databricks model-registry create-model` - Create a model. - -Creates a new registered model with the name specified in the request body. - -Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name exists. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--description` - Optional description for registered model. - -### `databricks model-registry create-model-version` - Create a model version. - -Creates a model version. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--description` - Optional description for model version. - * `--run-id` - MLflow run ID for correlation, if `source` was generated by an experiment run in MLflow tracking server. - * `--run-link` - MLflow run link - this is the exact link of the run that generated this model version, potentially hosted at another instance of MLflow. - -### `databricks model-registry create-transition-request` - Make a transition request. - -Creates a model version stage transition request. - -Flags: - * `--comment` - User-provided comment on the action. - -### `databricks model-registry create-webhook` - Create a webhook. - -**NOTE**: This endpoint is in Public Preview. - -Creates a registry webhook. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--description` - User-specified description for the webhook. - * `--model-name` - Name of the model whose events would trigger this webhook. - * `--status` - This describes an enum. - -### `databricks model-registry delete-comment` - Delete a comment. - -Deletes a comment on a model version. - -### `databricks model-registry delete-model` - Delete a model. - -Deletes a registered model. - -### `databricks model-registry delete-model-tag` - Delete a model tag. - -Deletes the tag for a registered model. - -### `databricks model-registry delete-model-version` - Delete a model version. - -Deletes a model version. - -### `databricks model-registry delete-model-version-tag` - Delete a model version tag. - -Deletes a model version tag. - -### `databricks model-registry delete-transition-request` - Delete a ransition request. - -Cancels a model version stage transition request. - -Flags: - * `--comment` - User-provided comment on the action. - -### `databricks model-registry delete-webhook` - Delete a webhook. - -**NOTE:** This endpoint is in Public Preview. - -Deletes a registry webhook. - -Flags: - * `--id` - Webhook ID required to delete a registry webhook. - -### `databricks model-registry get-latest-versions` - Get the latest version. - -Gets the latest version of a registered model. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks model-registry get-model` - Get model. - -Get the details of a model. This is a Databricks Workspace version of the [MLflow endpoint](https://www.mlflow.org/docs/latest/rest-api.html#get-registeredmodel) -that also returns the model's Databricks Workspace ID and the permission level of the requesting user on the model. - -### `databricks model-registry get-model-version` - Get a model version. - -Get a model version. - -### `databricks model-registry get-model-version-download-uri` - Get a model version URI. - -Gets a URI to download the model version. - -### `databricks model-registry list-models` - List models. - -Lists all available registered models, up to the limit specified in __max_results__. - -Flags: - * `--max-results` - Maximum number of registered models desired. - * `--page-token` - Pagination token to go to the next page based on a previous query. - -### `databricks model-registry list-transition-requests` - List transition requests. - -Gets a list of all open stage transition requests for the model version. - -### `databricks model-registry list-webhooks` - List registry webhooks. - -**NOTE:** This endpoint is in Public Preview. - -Lists all registry webhooks. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--model-name` - If not specified, all webhooks associated with the specified events are listed, regardless of their associated model. - * `--page-token` - Token indicating the page of artifact results to fetch. - -### `databricks model-registry reject-transition-request` - Reject a transition request. - -Rejects a model version stage transition request. - -Flags: - * `--comment` - User-provided comment on the action. - -### `databricks model-registry rename-model` - Rename a model. - -Renames a registered model. - -Flags: - * `--new-name` - If provided, updates the name for this `registered_model`. - -### `databricks model-registry search-model-versions` - Searches model versions. - -Searches for specific model versions based on the supplied __filter__. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - String filter condition, like "name='my-model-name'". - * `--max-results` - Maximum number of models desired. - -### `databricks model-registry search-models` - Search models. - -Search for registered models based on the specified __filter__. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - String filter condition, like "name LIKE 'my-model-name'". - * `--max-results` - Maximum number of models desired. - -### `databricks model-registry set-model-tag` - Set a tag. - -Sets a tag on a registered model. - -### `databricks model-registry set-model-version-tag` - Set a version tag. - -Sets a model version tag. - -### `databricks model-registry test-registry-webhook` - Test a webhook. - -**NOTE:** This endpoint is in Public Preview. - -Tests a registry webhook. - -Flags: - * `--event` - If `event` is specified, the test trigger uses the specified event. - -### `databricks model-registry transition-stage` - Transition a stage. - -Transition a model version's stage. This is a Databricks Workspace version of the [MLflow endpoint](https://www.mlflow.org/docs/latest/rest-api.html#transition-modelversion-stage) -that also accepts a comment associated with the transition to be recorded.", - -Flags: - * `--comment` - User-provided comment on the action. - -### `databricks model-registry update-comment` - Update a comment. - -Post an edit to a comment on a model version. - -### `databricks model-registry update-model` - Update model. - -Updates a registered model. - -Flags: - * `--description` - If provided, updates the description for this `registered_model`. - -### `databricks model-registry update-model-version` - Update model version. - -Updates the model version. - -Flags: - * `--description` - If provided, updates the description for this `registered_model`. - -### `databricks model-registry update-webhook` - Update a webhook. - -**NOTE:** This endpoint is in Public Preview. - -Updates a registry webhook. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--description` - User-specified description for the webhook. - * `--status` - This describes an enum. - -## `databricks account networks` - Manage network configurations. - -These commands manage network configurations for customer-managed VPCs (optional). Its ID is used when creating a new workspace if you use customer-managed VPCs. - -### `databricks account networks create` - Create network configuration. - -Creates a Databricks network configuration that represents an VPC and its resources. The VPC will be used for new Databricks clusters. This requires a pre-existing VPC and subnets. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--vpc-id` - The ID of the VPC associated with this network. - -### `databricks account networks delete` - Delete a network configuration. - -Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot delete a network that is associated with a workspace. - -This operation is available only if your account is on the E2 version of the platform. - -### `databricks account networks get` - Get a network configuration. - -Gets a Databricks network configuration, which represents a cloud VPC and its resources. - -### `databricks account networks list` - Get all network configurations. - -Gets a list of all Databricks network configurations for an account, specified by ID. - -This operation is available only if your account is on the E2 version of the platform. - -## `databricks account o-auth-enrollment` - These commands enable administrators to enroll OAuth for their accounts, which is required for adding/using any OAuth published/custom application integration. - -These commands enable administrators to enroll OAuth for their accounts, which is required for adding/using any OAuth published/custom application integration. - -**Note:** Your account must be on the E2 version to use These commands, this is because OAuth -is only supported on the E2 version. - -### `databricks account o-auth-enrollment create` - Create OAuth Enrollment request. - -Create an OAuth Enrollment request to enroll OAuth for this account and optionally enable -the OAuth integration for all the partner applications in the account. - -The parter applications are: - - Power BI - - Tableau Desktop - - Databricks CLI - -The enrollment is executed asynchronously, so the API will return 204 immediately. The -actual enrollment take a few minutes, you can check the status via API :method:get. - -Flags: - * `--enable-all-published-apps` - If true, enable OAuth for all the published applications in the account. - -### `databricks account o-auth-enrollment get` - Get OAuth enrollment status. - -Gets the OAuth enrollment status for this Account. - -You can only add/use the OAuth published/custom application integrations when OAuth enrollment -status is enabled. - -## `databricks permissions` - Manage access for various users on different objects and endpoints. - -Permissions API are used to create read, write, edit, update and manage access for various -users on different objects and endpoints. - -### `databricks permissions get` - Get object permissions. - -Gets the permission of an object. Objects can inherit permissions from their parent objects or root objects. - -### `databricks permissions get-permission-levels` - Get permission levels. - -Gets the permission levels that a user can have on an object. - -### `databricks permissions set` - Set permissions. - -Sets permissions on object. Objects can inherit permissions from their parent objects and root objects. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks permissions update` - Update permission. - -Updates the permissions on an object. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks pipelines` - Manage Delta Live Tables from command-line. - -The Delta Live Tables API allows you to create, edit, delete, start, and view details about -pipelines. - -Delta Live Tables is a framework for building reliable, maintainable, and testable data -processing pipelines. You define the transformations to perform on your data, and Delta Live -Tables manages task orchestration, cluster management, monitoring, data quality, and error -handling. - -Instead of defining your data pipelines using a series of separate Apache Spark tasks, Delta -Live Tables manages how your data is transformed based on a target schema you define for each -processing step. You can also enforce data quality with Delta Live Tables expectations. -Expectations allow you to define expected data quality and specify how to handle records that -fail those expectations. - -### `databricks pipelines create` - Create a pipeline. - -Creates a new data processing pipeline based on the requested configuration. If successful, this method returns -the ID of the new pipeline. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--allow-duplicate-names` - If false, deployment will fail if name conflicts with that of another pipeline. - * `--catalog` - A catalog in Unity Catalog to publish data from this pipeline to. - * `--channel` - DLT Release Channel that specifies which version to use. - * `--continuous` - Whether the pipeline is continuous or triggered. - * `--development` - Whether the pipeline is in Development mode. - * `--dry-run` - - * `--edition` - Pipeline product edition. - * `--id` - Unique identifier for this pipeline. - * `--name` - Friendly identifier for this pipeline. - * `--photon` - Whether Photon is enabled for this pipeline. - * `--storage` - DBFS root directory for storing checkpoints and tables. - * `--target` - Target schema (database) to add tables in this pipeline to. - -### `databricks pipelines delete` - Delete a pipeline. - -Deletes a pipeline. - -### `databricks pipelines get` - Get a pipeline. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks pipelines get-update` - Get a pipeline update. - -Gets an update from an active pipeline. - -### `databricks pipelines list-pipeline-events` - List pipeline events. - -Retrieves events for a pipeline. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - Criteria to select a subset of results, expressed using a SQL-like syntax. - * `--max-results` - Max number of entries to return in a single page. - * `--page-token` - Page token returned by previous call. - -### `databricks pipelines list-pipelines` - List pipelines. - -Lists pipelines defined in the Delta Live Tables system. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - Select a subset of results based on the specified criteria. - * `--max-results` - The maximum number of entries to return in a single page. - * `--page-token` - Page token returned by previous call. - -### `databricks pipelines list-updates` - List pipeline updates. - -List updates for an active pipeline. - -Flags: - * `--max-results` - Max number of entries to return in a single page. - * `--page-token` - Page token returned by previous call. - * `--until-update-id` - If present, returns updates until and including this update_id. - -### `databricks pipelines reset` - Reset a pipeline. - -Resets a pipeline. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks pipelines start-update` - Queue a pipeline update. - -Starts or queues a pipeline update. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--cause` - - * `--full-refresh` - If true, this update will reset all tables before running. - -### `databricks pipelines stop` - Stop a pipeline. - -Stops a pipeline. - -Flags: - * `--no-wait` - do not wait to reach IDLE state. - * `--timeout` - maximum amount of time to reach IDLE state. - -### `databricks pipelines update` - Edit a pipeline. - -Updates a pipeline with the supplied configuration. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--allow-duplicate-names` - If false, deployment will fail if name has changed and conflicts the name of another pipeline. - * `--catalog` - A catalog in Unity Catalog to publish data from this pipeline to. - * `--channel` - DLT Release Channel that specifies which version to use. - * `--continuous` - Whether the pipeline is continuous or triggered. - * `--development` - Whether the pipeline is in Development mode. - * `--edition` - Pipeline product edition. - * `--expected-last-modified` - If present, the last-modified time of the pipeline settings before the edit. - * `--id` - Unique identifier for this pipeline. - * `--name` - Friendly identifier for this pipeline. - * `--photon` - Whether Photon is enabled for this pipeline. - * `--pipeline-id` - Unique identifier for this pipeline. - * `--storage` - DBFS root directory for storing checkpoints and tables. - * `--target` - Target schema (database) to add tables in this pipeline to. - -## `databricks policy-families` - View available policy families. - -View available policy families. A policy family contains a policy definition providing best -practices for configuring clusters for a particular use case. - -Databricks manages and provides policy families for several common cluster use cases. You -cannot create, edit, or delete policy families. - -Policy families cannot be used directly to create clusters. Instead, you create cluster -policies using a policy family. Cluster policies created using a policy family inherit the -policy family's policy definition. - -### `databricks policy-families get` - get cluster policy family. - -Do it. - -### `databricks policy-families list` - list policy families. - -Flags: - * `--max-results` - The max number of policy families to return. - * `--page-token` - A token that can be used to get the next page of results. - -## `databricks account private-access` - PrivateLink settings. - -These commands manage private access settings for this account. - -### `databricks account private-access create` - Create private access settings. - -Creates a private access settings object, which specifies how your workspace is -accessed over [AWS PrivateLink](https://aws.amazon.com/privatelink). To use AWS -PrivateLink, a workspace must have a private access settings object referenced -by ID in the workspace's `private_access_settings_id` property. - -You can share one private access settings with multiple workspaces in a single account. However, -private access settings are specific to AWS regions, so only workspaces in the same -AWS region can use a given private access settings object. - -Before configuring PrivateLink, read the -[Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--private-access-level` - The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. - * `--public-access-enabled` - Determines if the workspace can be accessed over public internet. - -### `databricks account private-access delete` - Delete a private access settings object. - -Deletes a private access settings object, which determines how your workspace is accessed over [AWS PrivateLink](https://aws.amazon.com/privatelink). - -Before configuring PrivateLink, read the [Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -### `databricks account private-access get` - Get a private access settings object. - -Gets a private access settings object, which specifies how your workspace is accessed over [AWS PrivateLink](https://aws.amazon.com/privatelink). - -Before configuring PrivateLink, read the [Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -### `databricks account private-access list` - Get all private access settings objects. - -Gets a list of all private access settings objects for an account, specified by ID. - -### `databricks account private-access replace` - Replace private access settings. - -Updates an existing private access settings object, which specifies how your workspace is -accessed over [AWS PrivateLink](https://aws.amazon.com/privatelink). To use AWS -PrivateLink, a workspace must have a private access settings object referenced by ID in -the workspace's `private_access_settings_id` property. - -This operation completely overwrites your existing private access settings object attached to your workspaces. -All workspaces attached to the private access settings are affected by any change. -If `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` -are updated, effects of these changes might take several minutes to propagate to the -workspace API. - -You can share one private access settings object with multiple -workspaces in a single account. However, private access settings are specific to -AWS regions, so only workspaces in the same AWS region can use a given private access -settings object. - -Before configuring PrivateLink, read the -[Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--private-access-level` - The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. - * `--public-access-enabled` - Determines if the workspace can be accessed over public internet. - -## `databricks providers` - Delta Sharing Providers commands. - -Databricks Providers commands - -### `databricks providers create` - Create an auth provider. - -Creates a new authentication provider minimally based on a name and authentication type. -The caller must be an admin on the metastore. - -Flags: - * `--comment` - Description about the provider. - * `--recipient-profile-str` - This field is required when the __authentication_type__ is **TOKEN** or not provided. - -### `databricks providers delete` - Delete a provider. - -Deletes an authentication provider, if the caller is a metastore admin or is the owner of the provider. - -### `databricks providers get` - Get a provider. - -Gets a specific authentication provider. The caller must supply the name of the provider, and must either be a metastore admin or the owner of the provider. - -### `databricks providers list` - List providers. - -Gets an array of available authentication providers. -The caller must either be a metastore admin or the owner of the providers. -Providers not owned by the caller are not included in the response. -There is no guarantee of a specific ordering of the elements in the array. - -Flags: - * `--data-provider-global-metastore-id` - If not provided, all providers will be returned. - -### `databricks providers list-shares` - List shares by Provider. - -Gets an array of a specified provider's shares within the metastore where: - - * the caller is a metastore admin, or - * the caller is the owner. - -### `databricks providers update` - Update a provider. - -Updates the information for an authentication provider, if the caller is a metastore admin or is the owner of the provider. -If the update changes the provider name, the caller must be both a metastore admin and the owner of the provider. - -Flags: - * `--comment` - Description about the provider. - * `--name` - The name of the Provider. - * `--owner` - Username of Provider owner. - * `--recipient-profile-str` - This field is required when the __authentication_type__ is **TOKEN** or not provided. - -## `databricks account published-app-integration` - manage published OAuth app integrations like Tableau Cloud for Databricks in AWS cloud. - -These commands enable administrators to manage published oauth app integrations, which is required for -adding/using Published OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. - -**Note:** You can only add/use the OAuth published application integrations when OAuth enrollment -status is enabled. - -### `databricks account published-app-integration create` - Create Published OAuth App Integration. - -Create Published OAuth App Integration. - -You can retrieve the published oauth app integration via :method:get. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--app-id` - app_id of the oauth published app integration. - -### `databricks account published-app-integration delete` - Delete Published OAuth App Integration. - -Delete an existing Published OAuth App Integration. -You can retrieve the published oauth app integration via :method:get. - -### `databricks account published-app-integration get` - Get OAuth Published App Integration. - -Gets the Published OAuth App Integration for the given integration id. - -### `databricks account published-app-integration list` - Get published oauth app integrations. - -Get the list of published oauth app integrations for the specified Databricks Account - -### `databricks account published-app-integration update` - Updates Published OAuth App Integration. - -Updates an existing published OAuth App Integration. You can retrieve the published oauth app integration via :method:get. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks queries` - These endpoints are used for CRUD operations on query definitions. - -These endpoints are used for CRUD operations on query definitions. Query definitions include -the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. - -### `databricks queries create` - Create a new query definition. - -Creates a new query definition. Queries created with this endpoint belong to the authenticated user making the request. - -The `data_source_id` field specifies the ID of the SQL warehouse to run this query against. You can use the Data Sources API to see a complete list of available SQL warehouses. Or you can copy the `data_source_id` from an existing query. - -**Note**: You cannot add a visualization until you create the query. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--data-source-id` - The ID of the data source / SQL warehouse where this query will run. - * `--description` - General description that can convey additional information about this query such as usage notes. - * `--name` - The name or title of this query to display in list views. - * `--parent` - The identifier of the workspace folder containing the query. - * `--query` - The text of the query. - -### `databricks queries delete` - Delete a query. - -Moves a query to the trash. -Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. -The trash is deleted after 30 days. - -### `databricks queries get` - Get a query definition. - -Retrieve a query object definition along with contextual permissions information about the currently authenticated user. - -### `databricks queries list` - Get a list of queries. - -Gets a list of queries. Optionally, this list can be filtered by a search term. - -Flags: - * `--order` - Name of query attribute to order by. - * `--page` - Page number to retrieve. - * `--page-size` - Number of queries to return per page. - * `--q` - Full text search term. - -### `databricks queries restore` - Restore a query. - -Restore a query that has been moved to the trash. -A restored query appears in list views and searches. You can use restored queries for alerts. - -### `databricks queries update` - Change a query definition. - -Modify this query definition. - -**Note**: You cannot undo this operation. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--data-source-id` - The ID of the data source / SQL warehouse where this query will run. - * `--description` - General description that can convey additional information about this query such as usage notes. - * `--name` - The name or title of this query to display in list views. - * `--query` - The text of the query. - -## `databricks query-history` - Access the history of queries through SQL warehouses. - -Access the history of queries through SQL warehouses. - -### `databricks query-history list` - List Queries. - -List the history of queries through SQL warehouses. You can filter by user ID, warehouse ID, status, and time range. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--include-metrics` - Whether to include metrics about query. - * `--max-results` - Limit the number of results returned in one page. - * `--page-token` - A token that can be used to get the next page of results. - -## `databricks recipient-activation` - Delta Sharing recipient activation commands. - -Databricks Recipient Activation commands - -### `databricks recipient-activation get-activation-url-info` - Get a share activation URL. - -Gets an activation URL for a share. - -### `databricks recipient-activation retrieve-token` - Get an access token. - -Retrieve access token with an activation url. This is a public API without any authentication. - -## `databricks recipients` - Delta Sharing recipients. - -Databricks Recipients commands - -### `databricks recipients create` - Create a share recipient. - -Creates a new recipient with the delta sharing authentication type in the metastore. -The caller must be a metastore admin or has the **CREATE_RECIPIENT** privilege on the metastore. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - Description about the recipient. - * `--owner` - Username of the recipient owner. - * `--sharing-code` - The one-time sharing code provided by the data recipient. - -### `databricks recipients delete` - Delete a share recipient. - -Deletes the specified recipient from the metastore. The caller must be the owner of the recipient. - -### `databricks recipients get` - Get a share recipient. - -Gets a share recipient from the metastore if: - - * the caller is the owner of the share recipient, or: - * is a metastore admin - -### `databricks recipients list` - List share recipients. - -Gets an array of all share recipients within the current metastore where: - - * the caller is a metastore admin, or - * the caller is the owner. - -There is no guarantee of a specific ordering of the elements in the array. - -Flags: - * `--data-recipient-global-metastore-id` - If not provided, all recipients will be returned. - -### `databricks recipients rotate-token` - Rotate a token. - -Refreshes the specified recipient's delta sharing authentication token with the provided token info. -The caller must be the owner of the recipient. - -### `databricks recipients share-permissions` - Get recipient share permissions. - -Gets the share permissions for the specified Recipient. The caller must be a metastore admin or the owner of the Recipient. - -### `databricks recipients update` - Update a share recipient. - -Updates an existing recipient in the metastore. The caller must be a metastore admin or the owner of the recipient. -If the recipient name will be updated, the user must be both a metastore admin and the owner of the recipient. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - Description about the recipient. - * `--name` - Name of Recipient. - * `--owner` - Username of the recipient owner. - -## `databricks repos` - Manage their git repos. - -The Repos API allows users to manage their git repos. Users can use the API to access all -repos that they have manage permissions on. - -Databricks Repos is a visual Git client in Databricks. It supports common Git operations -such a cloning a repository, committing and pushing, pulling, branch management, and visual -comparison of diffs when committing. - -Within Repos you can develop code in notebooks or other files and follow data science and -engineering code development best practices using Git for version control, collaboration, -and CI/CD. - -### `databricks repos create` - Create a repo. - -Creates a repo in the workspace and links it to the remote Git repo specified. -Note that repos created programmatically must be linked to a remote Git repo, unlike repos created in the browser. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--path` - Desired path for the repo in the workspace. - -### `databricks repos delete` - Delete a repo. - -Deletes the specified repo. - -### `databricks repos get` - Get a repo. - -Returns the repo with the given repo ID. - -### `databricks repos list` - Get repos. - -Returns repos that the calling user has Manage permissions on. Results are paginated with each page containing twenty repos. - -Flags: - * `--next-page-token` - Token used to get the next page of results. - * `--path-prefix` - Filters repos that have paths starting with the given path prefix. - -### `databricks repos update` - Update a repo. - -Updates the repo to a different branch or tag, or updates the repo to the latest commit on the same branch. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--branch` - Branch that the local version of the repo is checked out to. - * `--tag` - Tag that the local version of the repo is checked out to. - -## `databricks schemas` - Manage schemas in Unity Catalog. - -A schema (also called a database) is the second layer of Unity Catalog’s three-level -namespace. A schema organizes tables, views and functions. To access (or list) a table or view in -a schema, users must have the USE_SCHEMA data permission on the schema and its parent catalog, -and they must have the SELECT permission on the table or view. - -### `databricks schemas create` - Create a schema. - -Creates a new schema for catalog in the Metatastore. The caller must be a metastore admin, or have the **CREATE_SCHEMA** privilege in the parent catalog. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - User-provided free-form text description. - * `--storage-root` - Storage root URL for managed tables within schema. - -### `databricks schemas delete` - Delete a schema. - -Deletes the specified schema from the parent catalog. The caller must be the owner of the schema or an owner of the parent catalog. - -### `databricks schemas get` - Get a schema. - -Gets the specified schema within the metastore. The caller must be a metastore admin, the owner of the schema, or a user that has the **USE_SCHEMA** privilege on the schema. - -### `databricks schemas list` - List schemas. - -Gets an array of schemas for a catalog in the metastore. If the caller is the metastore admin or the owner of the parent catalog, all schemas for the catalog will be retrieved. -Otherwise, only schemas owned by the caller (or for which the caller has the **USE_SCHEMA** privilege) will be retrieved. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks schemas update` - Update a schema. - -Updates a schema for a catalog. The caller must be the owner of the schema or a metastore admin. -If the caller is a metastore admin, only the __owner__ field can be changed in the update. -If the __name__ field must be updated, the caller must be a metastore admin or have the **CREATE_SCHEMA** privilege on the parent catalog. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - User-provided free-form text description. - * `--name` - Name of schema, relative to parent catalog. - * `--owner` - Username of current owner of schema. - -## `databricks secrets` - manage secrets, secret scopes, and access permissions. - -The Secrets API allows you to manage secrets, secret scopes, and access permissions. - -Sometimes accessing data requires that you authenticate to external data sources through JDBC. -Instead of directly entering your credentials into a notebook, use Databricks secrets to store -your credentials and reference them in notebooks and jobs. - -Administrators, secret creators, and users granted permission can read Databricks secrets. -While Databricks makes an effort to redact secret values that might be displayed in notebooks, -it is not possible to prevent such users from reading secrets. - -### `databricks secrets create-scope` - Create a new secret scope. - -The scope name must consist of alphanumeric characters, dashes, underscores, and periods, -and may not exceed 128 characters. The maximum number of scopes in a workspace is 100. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--initial-manage-principal` - The principal that is initially granted `MANAGE` permission to the created scope. - * `--scope-backend-type` - The backend type the scope will be created with. - -### `databricks secrets delete-acl` - Delete an ACL. - -Deletes the given ACL on the given scope. - -Users must have the `MANAGE` permission to invoke command. -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope, principal, or ACL exists. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets delete-scope` - Delete a secret scope. - -Deletes a secret scope. - -Throws `RESOURCE_DOES_NOT_EXIST` if the scope does not exist. Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets delete-secret` - Delete a secret. - -Deletes the secret stored in this secret scope. You must have `WRITE` or `MANAGE` permission on the secret scope. - -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope or secret exists. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets get-acl` - Get secret ACL details. - -Gets the details about the given ACL, such as the group and permission. -Users must have the `MANAGE` permission to invoke command. - -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets list-acls` - Lists ACLs. - -List the ACLs for a given secret scope. Users must have the `MANAGE` permission to invoke command. - -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets list-scopes` - List all scopes. - -Lists all secret scopes available in the workspace. - -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets list-secrets` - List secret keys. - -Lists the secret keys that are stored at this scope. -This is a metadata-only operation; secret data cannot be retrieved using command. -Users need the READ permission to make this call. - -The lastUpdatedTimestamp returned is in milliseconds since epoch. -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets put-acl` - Create/update an ACL. - -Creates or overwrites the Access Control List (ACL) associated with the given principal (user or group) on the -specified scope point. - -In general, a user or group will use the most powerful permission available to them, -and permissions are ordered as follows: - -* `MANAGE` - Allowed to change ACLs, and read and write to this secret scope. -* `WRITE` - Allowed to read and write to this secret scope. -* `READ` - Allowed to read this secret scope and list what secrets are available. - -Note that in general, secret values can only be read from within a command on a cluster (for example, through a notebook). -There is no API to read the actual secret value material outside of a cluster. -However, the user's permission will be applied based on who is executing the command, and they must have at least READ permission. - -Users must have the `MANAGE` permission to invoke command. - -The principal is a user or group name corresponding to an existing Databricks principal to be granted or revoked access. - -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. -Throws `RESOURCE_ALREADY_EXISTS` if a permission for the principal already exists. -Throws `INVALID_PARAMETER_VALUE` if the permission is invalid. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets put-secret` - Add a secret. - -Inserts a secret under the provided scope with the given name. -If a secret already exists with the same name, this command overwrites the existing secret's value. -The server encrypts the secret using the secret scope's encryption settings before storing it. - -You must have `WRITE` or `MANAGE` permission on the secret scope. -The secret key must consist of alphanumeric characters, dashes, underscores, and periods, and cannot exceed 128 characters. -The maximum allowed secret value size is 128 KB. The maximum number of secrets in a given scope is 1000. - -The input fields "string_value" or "bytes_value" specify the type of the secret, which will determine the value returned when -the secret value is requested. Exactly one must be specified. - -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. -Throws `RESOURCE_LIMIT_EXCEEDED` if maximum number of secrets in scope is exceeded. -Throws `INVALID_PARAMETER_VALUE` if the key name or value length is invalid. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -Flags: - * `--bytes-value` - If specified, value will be stored as bytes. - * `--string-value` - If specified, note that the value will be stored in UTF-8 (MB4) form. - -## `databricks service-principals` - Manage service principals. - -Identities for use with jobs, automated tools, and systems such as scripts, apps, and -CI/CD platforms. Databricks recommends creating service principals to run production jobs -or modify production data. If all processes that act on production data run with service -principals, interactive users do not need any write, delete, or modify privileges in -production. This eliminates the risk of a user overwriting production data by accident. - -### `databricks service-principals create` - Create a service principal. - -Creates a new service principal in the Databricks Workspace. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--application-id` - UUID relating to the service principal. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks service principal ID. - -### `databricks service-principals delete` - Delete a service principal. - -Delete a single service principal in the Databricks Workspace. - -### `databricks service-principals get` - Get service principal details. - -Gets the details for a single service principal define in the Databricks Workspace. - -### `databricks service-principals list` - List service principals. - -Gets the set of service principals associated with a Databricks Workspace. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - - -### `databricks service-principals patch` - Update service principal details. - -Partially updates the details of a single service principal in the Databricks Workspace. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks service-principals update` - Replace service principal. - -Updates the details of a single service principal. - -This action replaces the existing service principal with the same name. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--application-id` - UUID relating to the service principal. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks service principal ID. - -## `databricks account service-principals` - Manage service principals on the account level. - -Identities for use with jobs, automated tools, and systems such as scripts, apps, and -CI/CD platforms. Databricks recommends creating service principals to run production jobs -or modify production data. If all processes that act on production data run with service -principals, interactive users do not need any write, delete, or modify privileges in -production. This eliminates the risk of a user overwriting production data by accident. - -### `databricks account service-principals create` - Create a service principal. - -Creates a new service principal in the Databricks Account. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--application-id` - UUID relating to the service principal. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks service principal ID. - -### `databricks account service-principals delete` - Delete a service principal. - -Delete a single service principal in the Databricks Account. - -### `databricks account service-principals get` - Get service principal details. - -Gets the details for a single service principal define in the Databricks Account. - -### `databricks account service-principals list` - List service principals. - -Gets the set of service principals associated with a Databricks Account. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - -### `databricks account service-principals patch` - Update service principal details. - -Partially updates the details of a single service principal in the Databricks Account. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account service-principals update` - Replace service principal. - -Updates the details of a single service principal. - -This action replaces the existing service principal with the same name. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--application-id` - UUID relating to the service principal. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks service principal ID. - -## `databricks serving-endpoints` - Manage model serving endpoints. - -The Serving Endpoints API allows you to create, update, and delete model serving endpoints. - -You can use a serving endpoint to serve models from the Databricks Model Registry. Endpoints expose -the underlying models as scalable commands endpoints using serverless compute. This means -the endpoints and associated compute resources are fully managed by Databricks and will not appear in -your cloud account. A serving endpoint can consist of one or more MLflow models from the Databricks -Model Registry, called served models. A serving endpoint can have at most ten served models. You can configure -traffic settings to define how requests should be routed to your served models behind an endpoint. -Additionally, you can configure the scale of resources that should be applied to each served model. - -### `databricks serving-endpoints build-logs` - Retrieve the logs associated with building the model's environment for a given serving endpoint's served model. - -Retrieve the logs associated with building the model's environment for a given serving endpoint's served model. - -Retrieves the build logs associated with the provided served model. - -### `databricks serving-endpoints create` - Create a new serving endpoint. - -Flags: - * `--no-wait` - do not wait to reach NOT_UPDATING state. - * `--timeout` - maximum amount of time to reach NOT_UPDATING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks serving-endpoints delete` - Delete a serving endpoint. - -Delete a serving endpoint. - -### `databricks serving-endpoints export-metrics` - Retrieve the metrics corresponding to a serving endpoint for the current time in Prometheus or OpenMetrics exposition format. - -Retrieve the metrics corresponding to a serving endpoint for the current time in Prometheus or OpenMetrics exposition format. - -Retrieves the metrics associated with the provided serving endpoint in either Prometheus or OpenMetrics exposition format. - -### `databricks serving-endpoints get` - Get a single serving endpoint. - -Retrieves the details for a single serving endpoint. - -### `databricks serving-endpoints list` - Retrieve all serving endpoints. - -Retrieve all serving endpoints. - -### `databricks serving-endpoints logs` - Retrieve the most recent log lines associated with a given serving endpoint's served model. - -Retrieves the service logs associated with the provided served model. - -### `databricks serving-endpoints query` - Query a serving endpoint with provided model input. - -Query a serving endpoint with provided model input. - -### `databricks serving-endpoints update-config` - Update a serving endpoint with a new config. - -Update a serving endpoint with a new config. - -Updates any combination of the serving endpoint's served models, the compute -configuration of those served models, and the endpoint's traffic config. -An endpoint that already has an update in progress can not be updated until -the current update completes or fails. - -Flags: - * `--no-wait` - do not wait to reach NOT_UPDATING state. - * `--timeout` - maximum amount of time to reach NOT_UPDATING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks shares` - Databricks Shares commands. - -Databricks Shares commands - -### `databricks shares create` - Create a share. - -Creates a new share for data objects. Data objects can be added after creation with **update**. -The caller must be a metastore admin or have the **CREATE_SHARE** privilege on the metastore. - -Flags: - * `--comment` - User-provided free-form text description. - -### `databricks shares delete` - Delete a share. - -Deletes a data object share from the metastore. The caller must be an owner of the share. - -### `databricks shares get` - Get a share. - -Gets a data object share from the metastore. The caller must be a metastore admin or the owner of the share. - -Flags: - * `--include-shared-data` - Query for data to include in the share. - -### `databricks shares list` - List shares. - -Gets an array of data object shares from the metastore. The caller must be a metastore admin or the owner of the share. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks shares share-permissions` - Get permissions. - -Gets the permissions for a data share from the metastore. -The caller must be a metastore admin or the owner of the share. - -### `databricks shares update` - Update a share. - -Updates the share with the changes and data objects in the request. -The caller must be the owner of the share or a metastore admin. - -When the caller is a metastore admin, only the __owner__ field can be updated. - -In the case that the share name is changed, **updateShare** requires that the caller is both the share owner and -a metastore admin. - -For each table that is added through this method, the share owner must also have **SELECT** privilege on the table. -This privilege must be maintained indefinitely for recipients to be able to access the table. -Typically, you should use a group as the share owner. - -Table removals through **update** do not require additional privileges. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - User-provided free-form text description. - * `--name` - Name of the share. - * `--owner` - Username of current owner of share. - -### `databricks shares update-permissions` - Update permissions. - -Updates the permissions for a data share in the metastore. -The caller must be a metastore admin or an owner of the share. - -For new recipient grants, the user must also be the owner of the recipients. -recipient revocations do not require additional privileges. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks account storage` - Manage storage configurations for this workspace. - -These commands manage storage configurations for this workspace. A root storage S3 bucket in -your account is required to store objects like cluster logs, notebook revisions, and job -results. You can also use the root storage S3 bucket for storage of non-production DBFS -data. A storage configuration encapsulates this bucket information, and its ID is used when -creating a new workspace. - -### `databricks account storage create` - Create new storage configuration. - -Creates new storage configuration for an account, specified by ID. Uploads a storage configuration object that represents the root AWS S3 bucket in your account. Databricks stores related workspace assets including DBFS, cluster logs, and job results. For the AWS S3 bucket, you need to configure the required bucket policy. - -For information about how to create a new workspace with command, see [Create a new workspace using the Account API](http://docs.databricks.com/administration-guide/account-api/new-workspace.html) - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account storage delete` - Delete storage configuration. - -Deletes a Databricks storage configuration. You cannot delete a storage configuration that is associated with any workspace. - -### `databricks account storage get` - Get storage configuration. - -Gets a Databricks storage configuration for an account, both specified by ID. - -### `databricks account storage list` - Get all storage configurations. - -Gets a list of all Databricks storage configurations for your account, specified by ID. - -## `databricks storage-credentials` - Manage storage credentials for Unity Catalog. - -A storage credential represents an authentication and authorization mechanism for accessing -data stored on your cloud tenant. Each storage credential is subject to -Unity Catalog access-control policies that control which users and groups can access -the credential. If a user does not have access to a storage credential in Unity Catalog, -the request fails and Unity Catalog does not attempt to authenticate to your cloud tenant -on the user’s behalf. - -Databricks recommends using external locations rather than using storage credentials -directly. - -To create storage credentials, you must be a Databricks account admin. The account admin -who creates the storage credential can delegate ownership to another user or group to -manage permissions on it. - -### `databricks storage-credentials create` - Create a storage credential. - -Creates a new storage credential. The request object is specific to the cloud: - - * **AwsIamRole** for AWS credentials - * **AzureServicePrincipal** for Azure credentials - * **GcpServiceAcountKey** for GCP credentials. - -The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on the metastore. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - Comment associated with the credential. - * `--read-only` - Whether the storage credential is only usable for read operations. - * `--skip-validation` - Supplying true to this argument skips validation of the created credential. - -### `databricks storage-credentials delete` - Delete a credential. - -Deletes a storage credential from the metastore. The caller must be an owner of the storage credential. - -Flags: - * `--force` - Force deletion even if there are dependent external locations or external tables. - -### `databricks storage-credentials get` - Get a credential. - -Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have some permission on the storage credential. - -### `databricks storage-credentials list` - List credentials. - -Gets an array of storage credentials (as __StorageCredentialInfo__ objects). -The array is limited to only those storage credentials the caller has permission to access. -If the caller is a metastore admin, all storage credentials will be retrieved. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks storage-credentials update` - Update a credential. - -Updates a storage credential on the metastore. The caller must be the owner of the storage credential or a metastore admin. If the caller is a metastore admin, only the __owner__ credential can be changed. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - Comment associated with the credential. - * `--force` - Force update even if there are dependent external locations or external tables. - * `--name` - The credential name. - * `--owner` - Username of current owner of credential. - * `--read-only` - Whether the storage credential is only usable for read operations. - * `--skip-validation` - Supplying true to this argument skips validation of the updated credential. - -### `databricks storage-credentials validate` - Validate a storage credential. - -Validates a storage credential. At least one of __external_location_name__ and __url__ need to be provided. If only one of them is -provided, it will be used for validation. And if both are provided, the __url__ will be used for -validation, and __external_location_name__ will be ignored when checking overlapping urls. - -Either the __storage_credential_name__ or the cloud-specific credential must be provided. - -The caller must be a metastore admin or the storage credential owner or -have the **CREATE_EXTERNAL_LOCATION** privilege on the metastore and the storage credential. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--external-location-name` - The name of an existing external location to validate. - * `--read-only` - Whether the storage credential is only usable for read operations. - * `--url` - The external location url to validate. - -## `databricks account storage-credentials` - These commands manage storage credentials for a particular metastore. - -These commands manage storage credentials for a particular metastore. - -### `databricks account storage-credentials create` - Create a storage credential. - -Creates a new storage credential. The request object is specific to the cloud: - - * **AwsIamRole** for AWS credentials - * **AzureServicePrincipal** for Azure credentials - * **GcpServiceAcountKey** for GCP credentials. - -The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on the metastore. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body. - * `--comment` - Comment associated with the credential. - * `--read-only` - Whether the storage credential is only usable for read operations. - * `--skip-validation` - Supplying true to this argument skips validation of the created credential. - -### `databricks account storage-credentials get` - Gets the named storage credential. - -Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have a level of privilege on the storage credential. - -### `databricks account storage-credentials list` - Get all storage credentials assigned to a metastore. - -Gets a list of all storage credentials that have been assigned to given metastore. - -## `databricks table-constraints` - Primary key and foreign key constraints encode relationships between fields in tables. - -Primary and foreign keys are informational only and are not enforced. Foreign keys must reference a primary key in another table. -This primary key is the parent constraint of the foreign key and the table this primary key is on is the parent table of the foreign key. -Similarly, the foreign key is the child constraint of its referenced primary key; the table of the foreign key is the child table of the primary key. - -You can declare primary keys and foreign keys as part of the table specification during table creation. -You can also add or drop constraints on existing tables. - -### `databricks table-constraints create` - Create a table constraint. - - -For the table constraint creation to succeed, the user must satisfy both of these conditions: -- the user must have the **USE_CATALOG** privilege on the table's parent catalog, - the **USE_SCHEMA** privilege on the table's parent schema, and be the owner of the table. -- if the new constraint is a __ForeignKeyConstraint__, - the user must have the **USE_CATALOG** privilege on the referenced parent table's catalog, - the **USE_SCHEMA** privilege on the referenced parent table's schema, - and be the owner of the referenced parent table. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks table-constraints delete` - Delete a table constraint. - -Deletes a table constraint. - -For the table constraint deletion to succeed, the user must satisfy both of these conditions: -- the user must have the **USE_CATALOG** privilege on the table's parent catalog, - the **USE_SCHEMA** privilege on the table's parent schema, and be the owner of the table. -- if __cascade__ argument is **true**, the user must have the following permissions on all of the child tables: - the **USE_CATALOG** privilege on the table's catalog, - the **USE_SCHEMA** privilege on the table's schema, - and be the owner of the table. - -## `databricks tables` - A table resides in the third layer of Unity Catalog’s three-level namespace. - -A table resides in the third layer of Unity Catalog’s three-level namespace. It contains -rows of data. To create a table, users must have CREATE_TABLE and USE_SCHEMA permissions on the schema, -and they must have the USE_CATALOG permission on its parent catalog. To query a table, users must -have the SELECT permission on the table, and they must have the USE_CATALOG permission on its -parent catalog and the USE_SCHEMA permission on its parent schema. - -A table can be managed or external. From an API perspective, a __VIEW__ is a particular kind of table (rather than a managed or external table). - -### `databricks tables delete` - Delete a table. - -Deletes a table from the specified parent catalog and schema. -The caller must be the owner of the parent catalog, have the **USE_CATALOG** privilege on the parent catalog and be the owner of the parent schema, -or be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - -### `databricks tables get` - Get a table. - -Gets a table from the metastore for a specific catalog and schema. -The caller must be a metastore admin, be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema, -or be the owner of the table and have the **SELECT** privilege on it as well. - -Flags: - * `--include-delta-metadata` - Whether delta metadata should be included in the response. - -### `databricks tables list` - List tables. - -Gets an array of all tables for the current metastore under the parent catalog and schema. -The caller must be a metastore admin or an owner of (or have the **SELECT** privilege on) the table. -For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. -There is no guarantee of a specific ordering of the elements in the array. - -Flags: - * `--include-delta-metadata` - Whether delta metadata should be included in the response. - * `--max-results` - Maximum number of tables to return (page length). - * `--page-token` - Opaque token to send for the next page of results (pagination). - -### `databricks tables list-summaries` - List table summaries. - -Gets an array of summaries for tables for a schema and catalog within the metastore. The table summaries returned are either: - -* summaries for all tables (within the current metastore and parent catalog and schema), when the user is a metastore admin, or: -* summaries for all tables and schemas (within the current metastore and parent catalog) - for which the user has ownership or the **SELECT** privilege on the table and ownership or **USE_SCHEMA** privilege on the schema, - provided that the user also has ownership or the **USE_CATALOG** privilege on the parent catalog. - -There is no guarantee of a specific ordering of the elements in the array. - -Flags: - * `--max-results` - Maximum number of tables to return (page length). - * `--page-token` - Opaque token to send for the next page of results (pagination). - * `--schema-name-pattern` - A sql LIKE pattern (% and _) for schema names. - * `--table-name-pattern` - A sql LIKE pattern (% and _) for table names. - -## `databricks token-management` - Enables administrators to get all tokens and delete tokens for other users. - -Enables administrators to get all tokens and delete tokens for other users. Admins can -either get every token, get a specific token by ID, or get all tokens for a particular user. - -### `databricks token-management create-obo-token` - Create on-behalf token. - -Creates a token on behalf of a service principal. - -Flags: - * `--comment` - Comment that describes the purpose of the token. - -### `databricks token-management delete` - Delete a token. - -Deletes a token, specified by its ID. - -### `databricks token-management get` - Get token info. - -Gets information about a token, specified by its ID. - -### `databricks token-management list` - List all tokens. - -Lists all tokens associated with the specified workspace or user. - -Flags: - * `--created-by-id` - User ID of the user that created the token. - * `--created-by-username` - Username of the user that created the token. - -## `databricks tokens` - The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks commandss. - -The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks commandss. - -### `databricks tokens create` - Create a user token. - -Creates and returns a token for a user. If this call is made through token authentication, it creates -a token with the same client ID as the authenticated token. If the user's token quota is exceeded, this call -returns an error **QUOTA_EXCEEDED**. - -Flags: - * `--comment` - Optional description to attach to the token. - * `--lifetime-seconds` - The lifetime of the token, in seconds. - -### `databricks tokens delete` - Revoke token. - -Revokes an access token. - -If a token with the specified ID is not valid, this call returns an error **RESOURCE_DOES_NOT_EXIST**. - -### `databricks tokens list` - List tokens. - -Lists all the valid tokens for a user-workspace pair. - -## `databricks users` - Manage users on the workspace-level. - -Databricks recommends using SCIM provisioning to sync users and groups automatically from -your identity provider to your Databricks Workspace. SCIM streamlines onboarding a new -employee or team by using your identity provider to create users and groups in Databricks Workspace -and give them the proper level of access. When a user leaves your organization or no longer -needs access to Databricks Workspace, admins can terminate the user in your identity provider and that -user’s account will also be removed from Databricks Workspace. This ensures a consistent offboarding -process and prevents unauthorized users from accessing sensitive data. - -### `databricks users create` - Create a new user. - -Creates a new user in the Databricks Workspace. This new user will also be added to the Databricks account. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks user ID. - * `--user-name` - Email address of the Databricks user. - -### `databricks users delete` - Delete a user. - -Deletes a user. Deleting a user from a Databricks Workspace also removes objects associated with the user. - -### `databricks users get` - Get user details. - -Gets information for a specific user in Databricks Workspace. - -### `databricks users list` - List users. - -Gets details for all the users associated with a Databricks Workspace. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - -### `databricks users patch` - Update user details. - -Partially updates a user resource by applying the supplied operations on specific user attributes. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks users update` - Replace a user. - -Replaces a user's information with the data supplied in request. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks user ID. - * `--user-name` - Email address of the Databricks user. - -## `databricks account users` - Manage users on the accou - -Databricks recommends using SCIM provisioning to sync users and groups automatically from -your identity provider to your Databricks Account. SCIM streamlines onboarding a new -employee or team by using your identity provider to create users and groups in Databricks Account -and give them the proper level of access. When a user leaves your organization or no longer -needs access to Databricks Account, admins can terminate the user in your identity provider and that -user’s account will also be removed from Databricks Account. This ensures a consistent offboarding -process and prevents unauthorized users from accessing sensitive data. - -### `databricks account users create` - Create a new user. - -Creates a new user in the Databricks Account. This new user will also be added to the Databricks account. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks user ID. - * `--user-name` - Email address of the Databricks user. - -### `databricks account users delete` - Delete a user. - -Deleting a user from a Databricks Account also removes objects associated with the user. - -### `databricks account users get` - Get user details. - -Gets information for a specific user in Databricks Account. - -### `databricks account users list` - List users. - -Gets details for all the users associated with a Databricks Account. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - -### `databricks account users patch` - Update user details. - -Partially updates a user resource by applying the supplied operations on specific user attributes. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account users update` - Replace a user. - -Replaces a user's information with the data supplied in request. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks user ID. - * `--user-name` - Email address of the Databricks user. - -## `databricks account vpc-endpoints` - Manage VPC endpoints. - -These commands manage VPC endpoint configurations for this account. - -### `databricks account vpc-endpoints create` - Create VPC endpoint configuration. - -Creates a VPC endpoint configuration, which represents a -[VPC endpoint](https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html) -object in AWS used to communicate privately with Databricks over -[AWS PrivateLink](https://aws.amazon.com/privatelink). - -After you create the VPC endpoint configuration, the Databricks -[endpoint service](https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html) -automatically accepts the VPC endpoint. - -Before configuring PrivateLink, read the -[Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body * `--aws-vpc-endpoint-id` - The ID of the VPC endpoint object in AWS. - * `--region` - The AWS region in which this VPC endpoint object exists. - -### `databricks account vpc-endpoints delete` - Delete VPC endpoint configuration. - -Deletes a VPC endpoint configuration, which represents an -[AWS VPC endpoint](https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html) that -can communicate privately with Databricks over [AWS PrivateLink](https://aws.amazon.com/privatelink). - -Before configuring PrivateLink, read the [Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -### `databricks account vpc-endpoints get` - Get a VPC endpoint configuration. - -Gets a VPC endpoint configuration, which represents a [VPC endpoint](https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html) object in AWS used to communicate privately with Databricks over -[AWS PrivateLink](https://aws.amazon.com/privatelink). - -### `databricks account vpc-endpoints list` - Get all VPC endpoint configurations. - -Gets a list of all VPC endpoints for an account, specified by ID. - -Before configuring PrivateLink, read the [Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -## `databricks warehouses` - Manage Databricks SQL warehouses. - -A SQL warehouse is a compute resource that lets you run SQL commands on data objects within -Databricks SQL. Compute resources are infrastructure resources that provide processing -capabilities in the cloud. - -### `databricks warehouses create` - Create a warehouse. - -Creates a new SQL warehouse. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--auto-stop-mins` - The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - * `--cluster-size` - Size of the clusters allocated for this warehouse. - * `--creator-name` - warehouse creator name. - * `--enable-photon` - Configures whether the warehouse should use Photon optimized clusters. - * `--enable-serverless-compute` - Configures whether the warehouse should use serverless compute. - * `--instance-profile-arn` - Deprecated. - * `--max-num-clusters` - Maximum number of clusters that the autoscaler will create to handle concurrent queries. - * `--min-num-clusters` - Minimum number of available clusters that will be maintained for this SQL warehouse. - * `--name` - Logical name for the cluster. - * `--spot-instance-policy` - Configurations whether the warehouse should use spot instances. - * `--warehouse-type` - Warehouse type: `PRO` or `CLASSIC`. - -### `databricks warehouses delete` - Delete a warehouse. - -Deletes a SQL warehouse. - -Flags: - * `--no-wait` - do not wait to reach DELETED state. - * `--timeout` - maximum amount of time to reach DELETED state. - -### `databricks warehouses edit` - Update a warehouse. - -Updates the configuration for a SQL warehouse. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--auto-stop-mins` - The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - * `--cluster-size` - Size of the clusters allocated for this warehouse. - * `--creator-name` - warehouse creator name. - * `--enable-photon` - Configures whether the warehouse should use Photon optimized clusters. - * `--enable-serverless-compute` - Configures whether the warehouse should use serverless compute. - * `--instance-profile-arn` - Deprecated. - * `--max-num-clusters` - Maximum number of clusters that the autoscaler will create to handle concurrent queries. - * `--min-num-clusters` - Minimum number of available clusters that will be maintained for this SQL warehouse. - * `--name` - Logical name for the cluster. - * `--spot-instance-policy` - Configurations whether the warehouse should use spot instances. - * `--warehouse-type` - Warehouse type: `PRO` or `CLASSIC`. - -### `databricks warehouses get` - Get warehouse info. - -Gets the information for a single SQL warehouse. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks warehouses get-workspace-warehouse-config` - Get the workspace configuration. - -Gets the workspace level configuration that is shared by all SQL warehouses in a workspace. - -### `databricks warehouses list` - List warehouses. - -Lists all SQL warehouses that a user has manager permissions on. - -Flags: - * `--run-as-user-id` - Service Principal which will be used to fetch the list of warehouses. - -### `databricks warehouses set-workspace-warehouse-config` - Set the workspace configuration. - -Sets the workspace level configuration that is shared by all SQL warehouses in a workspace. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--google-service-account` - GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage. - * `--instance-profile-arn` - AWS Only: Instance profile used to pass IAM role to the cluster. - * `--security-policy` - Security policy for warehouses. - * `--serverless-agreement` - Internal. - -### `databricks warehouses start` - Start a warehouse. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks warehouses stop` - Stop a warehouse. - -Flags: - * `--no-wait` - do not wait to reach STOPPED state. - * `--timeout` - maximum amount of time to reach STOPPED state. - -## `databricks workspace` - The Workspace API allows you to list, import, export, and delete notebooks and folders. - -A notebook is a web-based interface to a document that contains runnable code, visualizations, and explanatory text. - -### `databricks workspace delete` - Delete a workspace object. - -Delete a workspace object. - -Deletes an object or a directory (and optionally recursively deletes all objects in the directory). -* If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. -* If `path` is a non-empty directory and `recursive` is set to `false`, this call returns an error `DIRECTORY_NOT_EMPTY`. - -Object deletion cannot be undone and deleting a directory recursively is not atomic. - -Flags: - * `--recursive` - The flag that specifies whether to delete the object recursively. - -### `databricks workspace export` - Export a workspace object. - -Exports an object or the contents of an entire directory. - -If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. - -One can only export a directory in `DBC` format. If the exported data would exceed size limit, this call returns `MAX_NOTEBOOK_SIZE_EXCEEDED`. Currently, command does not support exporting a library. - -Flags: - * `--direct-download` - Flag to enable direct download. - * `--format` - This specifies the format of the exported file. - -### `databricks workspace get-status` - Get status. - -Gets the status of an object or a directory. -If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. - -### `databricks workspace import` - Import a workspace object. - -Imports a workspace object (for example, a notebook or file) or the contents of an entire directory. -If `path` already exists and `overwrite` is set to `false`, this call returns an error `RESOURCE_ALREADY_EXISTS`. -One can only use `DBC` format to import a directory. - -Flags: - * `--content` - The base64-encoded content. - * `--format` - This specifies the format of the file to be imported. - * `--language` - The language of the object. - * `--overwrite` - The flag that specifies whether to overwrite existing object. - -### `databricks workspace list` - List contents. - -Lists the contents of a directory, or the object if it is not a directory.If -the input path does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. - -Flags: - * `--notebooks-modified-after` - ... - -### `databricks workspace mkdirs` - Create a directory. - -Creates the specified directory (and necessary parent directories if they do not exist). -If there is an object (not a directory) at any prefix of the input path, this call returns -an error `RESOURCE_ALREADY_EXISTS`. - -Note that if this operation fails it may have succeeded in creating some of the necessary parrent directories. - -## `databricks account workspace-assignment` - The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. - -The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. - -### `databricks account workspace-assignment delete` - Delete permissions assignment. - -Deletes the workspace permissions assignment in a given account and workspace for the specified principal. - -### `databricks account workspace-assignment get` - List workspace permissions. - -Get an array of workspace permissions for the specified account and workspace. - -### `databricks account workspace-assignment list` - Get permission assignments. - -Get the permission assignments for the specified Databricks Account and Databricks Workspace. - -### `databricks account workspace-assignment update` - Create or update permissions assignment. - -Creates or updates the workspace permissions assignment in a given account and workspace for the specified principal. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks workspace-conf` - command allows updating known workspace settings for advanced users. - -command allows updating known workspace settings for advanced users. - -### `databricks workspace-conf get-status` - Check configuration status. - -Gets the configuration status for a workspace. - -### `databricks workspace-conf set-status` - Enable/disable features. - -Sets the configuration status for a workspace, including enabling or disabling it. - -## `databricks account workspaces` - These commands manage workspaces for this account. - -These commands manage workspaces for this account. A Databricks workspace is an environment for -accessing all of your Databricks assets. The workspace organizes objects (notebooks, -libraries, and experiments) into folders, and provides access to data and computational -resources such as clusters and jobs. - -These endpoints are available if your account is on the E2 version of the platform or on -a select custom plan that allows multiple workspaces per account. - -### `databricks account workspaces create` - Create a new workspace. - -Creates a new workspace. - -**Important**: This operation is asynchronous. A response with HTTP status code 200 means -the request has been accepted and is in progress, but does not mean that the workspace -deployed successfully and is running. The initial workspace status is typically -`PROVISIONING`. Use the workspace ID (`workspace_id`) field in the response to identify -the new workspace and make repeated `GET` requests with the workspace ID and check -its status. The workspace becomes available when the status changes to `RUNNING`. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--aws-region` - The AWS region of the workspace's data plane. - * `--cloud` - The cloud provider which the workspace uses. - * `--credentials-id` - ID of the workspace's credential configuration object. - * `--deployment-name` - The deployment name defines part of the subdomain for the workspace. - * `--location` - The Google Cloud region of the workspace data plane in your Google account. - * `--managed-services-customer-managed-key-id` - The ID of the workspace's managed services encryption key configuration object. - * `--network-id` - - * `--pricing-tier` - The pricing tier of the workspace. - * `--private-access-settings-id` - ID of the workspace's private access settings object. - * `--storage-configuration-id` - The ID of the workspace's storage configuration object. - * `--storage-customer-managed-key-id` - The ID of the workspace's storage encryption key configuration object. - -### `databricks account workspaces delete` - Delete a workspace. - -Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate. However, it might take a few minutes for all workspaces resources to be deleted, depending on the size and number of workspace resources. - -This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - -### `databricks account workspaces get` - Get a workspace. - -Gets information including status for a Databricks workspace, specified by ID. In the response, the `workspace_status` field indicates the current status. After initial workspace creation (which is asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`. - -For information about how to create a new workspace with command **including error handling**, see [Create a new workspace using the Account API](http://docs.databricks.com/administration-guide/account-api/new-workspace.html). - -This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - -### `databricks account workspaces list` - Get all workspaces. - -Gets a list of all workspaces associated with an account, specified by ID. - -This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - -### `databricks account workspaces update` - Update workspace configuration. - -Updates a workspace configuration for either a running workspace or a failed workspace. The elements that can be updated varies between these two use cases. - -Update a failed workspace: -You can update a Databricks workspace configuration for failed workspace deployment for some fields, but not all fields. For a failed workspace, this request supports updates to the following fields only: -- Credential configuration ID -- Storage configuration ID -- Network configuration ID. Used only to add or change a network configuration for a customer-managed VPC. For a failed workspace only, you can convert a workspace with Databricks-managed VPC to use a customer-managed VPC by adding this ID. You cannot downgrade a workspace with a customer-managed VPC to be a Databricks-managed VPC. You can update the network configuration for a failed or running workspace to add PrivateLink support, though you must also add a private access settings object. -- Key configuration ID for managed services (control plane storage, such as notebook source and Databricks SQL queries). Used only if you use customer-managed keys for managed services. -- Key configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). Used only if you use customer-managed keys for workspace storage. **Important**: If the workspace was ever in the running state, even if briefly before becoming a failed workspace, you cannot add a new key configuration ID for workspace storage. -- Private access settings ID to add PrivateLink support. You can add or update the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace. - -After calling the `PATCH` operation to update the workspace configuration, make repeated `GET` requests with the workspace ID and check the workspace status. The workspace is successful if the status changes to `RUNNING`. - -For information about how to create a new workspace with command **including error handling**, see [Create a new workspace using the Account API](http://docs.databricks.com/administration-guide/account-api/new-workspace.html). - -Update a running workspace: -You can update a Databricks workspace configuration for running workspaces for some fields, but not all fields. For a running workspace, this request supports updating the following fields only: -- Credential configuration ID - -- Network configuration ID. Used only if you already use a customer-managed VPC. You cannot convert a running workspace from a Databricks-managed VPC to a customer-managed VPC. You can use a network configuration update in command for a failed or running workspace to add support for PrivateLink, although you also need to add a private access settings object. - -- Key configuration ID for managed services (control plane storage, such as notebook source and Databricks SQL queries). Databricks does not directly encrypt the data with the customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK) that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to encrypt your workspace's managed services persisted data. If the workspace does not already have a CMK for managed services, adding this ID enables managed services encryption for new or updated data. Existing managed services data that existed before adding the key remains not encrypted with the DEK until it is modified. If the workspace already has customer-managed keys for managed services, this request rotates (changes) the CMK keys and the DEK is re-encrypted with the DMK and the new CMK. -- Key configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). You can set this only if the workspace does not already have a customer-managed key configuration for workspace storage. -- Private access settings ID to add PrivateLink support. You can add or update the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace. - -**Important**: To update a running workspace, your workspace must have no running compute resources that run in your workspace's VPC in the Classic data plane. For example, stop all all-purpose clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If you do not terminate all cluster instances in the workspace before calling command, the request will fail. - -**Important**: Customer-managed keys and customer-managed VPCs are supported by only some deployment types and subscription types. If you have questions about availability, contact your Databricks representative. - -This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--aws-region` - The AWS region of the workspace's data plane (for example, `us-west-2`). - * `--credentials-id` - ID of the workspace's credential configuration object. - * `--managed-services-customer-managed-key-id` - The ID of the workspace's managed services encryption key configuration object. - * `--network-id` - The ID of the workspace's network configuration object. - * `--storage-configuration-id` - The ID of the workspace's storage configuration object. - * `--storage-customer-managed-key-id` - The ID of the key configuration object for workspace storage. From d99f2b808b03b9a3d2215e2ed60ee911ba0f1fea Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 23 Apr 2024 15:31:36 +0200 Subject: [PATCH 162/286] Remove `JSON.parse` call from homebrew-tap action (#1393) ## Changes `needs.goreleaser.outputs.artifacts` already contains valid JS object so no need to make it a string and try to parse --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e09b500fb..8643ac355 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -84,7 +84,7 @@ jobs: with: github-token: ${{ secrets.DECO_GITHUB_TOKEN }} script: | - let artifacts = JSON.parse('${{ needs.goreleaser.outputs.artifacts }}') + let artifacts = ${{ needs.goreleaser.outputs.artifacts }} artifacts = artifacts.filter(a => a.type == "Archive") artifacts = new Map( artifacts.map(a => [ From 60122f60358d1c60cae5e546382ec6eb9b675c7d Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Tue, 23 Apr 2024 21:36:25 +0200 Subject: [PATCH 163/286] Show a better error message for using wheel tasks with older DBR versions (#1373) ## Changes This is a minor improvement to the error about wheel tasks with older DBR versions, since we get questions about it every now and then. It also adds a pointer to the docs that were added since the original messages was committed. --------- Co-authored-by: Pieter Noordhuis --- bundle/python/warning.go | 2 +- bundle/python/warning_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bundle/python/warning.go b/bundle/python/warning.go index 59c220a06..3da88b0d7 100644 --- a/bundle/python/warning.go +++ b/bundle/python/warning.go @@ -25,7 +25,7 @@ func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagn } if hasIncompatibleWheelTasks(ctx, b) { - return diag.Errorf("python wheel tasks with local libraries require compute with DBR 13.1+. Please change your cluster configuration or set experimental 'python_wheel_wrapper' setting to 'true'") + return diag.Errorf("Python wheel tasks require compute with DBR 13.3+ to include local libraries. Please change your cluster configuration or use the experimental 'python_wheel_wrapper' setting. See https://docs.databricks.com/dev-tools/bundles/python-wheel.html for more information.") } return nil } diff --git a/bundle/python/warning_test.go b/bundle/python/warning_test.go index 990545ab4..dd6397f78 100644 --- a/bundle/python/warning_test.go +++ b/bundle/python/warning_test.go @@ -102,7 +102,7 @@ func TestIncompatibleWheelTasksWithJobClusterKey(t *testing.T) { require.True(t, hasIncompatibleWheelTasks(context.Background(), b)) diags := bundle.Apply(context.Background(), b, WrapperWarning()) - require.ErrorContains(t, diags.Error(), "python wheel tasks with local libraries require compute with DBR 13.1+.") + require.ErrorContains(t, diags.Error(), "require compute with DBR 13.3") } func TestIncompatibleWheelTasksWithExistingClusterId(t *testing.T) { From 1c02224902eec47e61d40215b0bf4816eab2153e Mon Sep 17 00:00:00 2001 From: Kartik Gupta <88345179+kartikgupta-db@users.noreply.github.com> Date: Wed, 24 Apr 2024 11:18:13 +0200 Subject: [PATCH 164/286] Pass `DATABRICKS_CONFIG_FILE` env var to sdk config during `auth profiles` (#1394) ## Changes * Currently, we use `auth profiles` command with `DATABRICKS_CONFIG_FILE` env var set, the file pointed to by the env var is ONLY used for loading the profile names (ini file sections). It is not passed to go sdk config object. We also don't use env variable loader in the go sdk config object, so this env var is ignored by the config and only default file is read. * This PR explicitly sets the config file path in the go sdk config object. ## Tests * integration tests in vscode --- cmd/auth/profiles.go | 9 +++++---- cmd/auth/profiles_test.go | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/auth/profiles.go b/cmd/auth/profiles.go index 5ebea4440..797eb3b5f 100644 --- a/cmd/auth/profiles.go +++ b/cmd/auth/profiles.go @@ -29,10 +29,11 @@ func (c *profileMetadata) IsEmpty() bool { return c.Host == "" && c.AccountID == "" } -func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { +func (c *profileMetadata) Load(ctx context.Context, configFilePath string, skipValidate bool) { cfg := &config.Config{ - Loaders: []config.Loader{config.ConfigFile}, - Profile: c.Name, + Loaders: []config.Loader{config.ConfigFile}, + ConfigFile: configFilePath, + Profile: c.Name, } _ = cfg.EnsureResolved() if cfg.IsAws() { @@ -117,7 +118,7 @@ func newProfilesCommand() *cobra.Command { go func() { ctx := cmd.Context() t := time.Now() - profile.Load(ctx, skipValidate) + profile.Load(ctx, iniFile.Path(), skipValidate) log.Debugf(ctx, "Profile %q took %s to load", profile.Name, time.Since(t)) wg.Done() }() diff --git a/cmd/auth/profiles_test.go b/cmd/auth/profiles_test.go index 8a667a6db..91ff4d049 100644 --- a/cmd/auth/profiles_test.go +++ b/cmd/auth/profiles_test.go @@ -36,7 +36,7 @@ func TestProfiles(t *testing.T) { // Load the profile profile := &profileMetadata{Name: "profile1"} - profile.Load(ctx, true) + profile.Load(ctx, configFile, true) // Check the profile assert.Equal(t, "profile1", profile.Name) From 4c71f8cac4337cadef5a4eabfe09a05bdf9366f1 Mon Sep 17 00:00:00 2001 From: Jim Idle Date: Wed, 24 Apr 2024 11:34:09 -0600 Subject: [PATCH 165/286] Ensure that python dependencies are installed during upgrade (#1390) ## Changes The installer.Upgrade() processing did not install Python dependencies. This resulted in errors such as: ``` ModuleNotFoundError: No module named 'databricks.labs.blueprint' ``` Any new dependencies are now installed during the upgrade process. Resolves: databrickslabs/ucx#1276 ## Tests The TestUpgraderWorksForReleases test now checks to see if the upgrade process resulted in the dependencies being installed. --------- Signed-off-by: Jim.Idle --- cmd/labs/project/installer.go | 4 ++++ cmd/labs/project/installer_test.go | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/cmd/labs/project/installer.go b/cmd/labs/project/installer.go index 235d29bc4..42c4a8496 100644 --- a/cmd/labs/project/installer.go +++ b/cmd/labs/project/installer.go @@ -136,6 +136,10 @@ func (i *installer) Upgrade(ctx context.Context) error { if err != nil { return fmt.Errorf("installer: %w", err) } + err = i.installPythonDependencies(ctx, ".") + if err != nil { + return fmt.Errorf("python dependencies: %w", err) + } return nil } diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index 709e14f20..0e049b4c0 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -403,6 +403,12 @@ func TestUpgraderWorksForReleases(t *testing.T) { newHome := copyTestdata(t, "testdata/installed-in-home") ctx = env.WithUserHomeDir(ctx, newHome) + // Install stubs for the python calls we need to ensure were run in the + // upgrade process. + ctx, stub := process.WithStub(ctx) + stub.WithStderrFor(`python[\S]+ -m pip install .`, "[mock pip install]") + stub.WithStdoutFor(`python[\S]+ install.py`, "setting up important infrastructure") + py, _ := python.DetectExecutable(ctx) py, _ = filepath.Abs(py) ctx = env.Set(ctx, "PYTHON_BIN", py) @@ -420,4 +426,17 @@ func TestUpgraderWorksForReleases(t *testing.T) { r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "upgrade", "blueprint") r.RunAndExpectOutput("setting up important infrastructure") + + // Check if the stub was called with the 'python -m pip install' command + pi := false + for _, call := range stub.Commands() { + if strings.HasSuffix(call, "-m pip install .") { + pi = true + break + } + } + if !pi { + t.Logf(`Expected stub command 'python[\S]+ -m pip install .' not found`) + t.FailNow() + } } From 6fd581d173a9f28167457b24b356c02f00be7ba9 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 25 Apr 2024 16:50:45 +0530 Subject: [PATCH 166/286] Allow variable references in non-string fields in the JSON schema (#1398) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Tests Verified manually. Before: Screenshot 2024-04-24 at 7 18 44 PM After: Screenshot 2024-04-24 at 7 18 31 PM Screenshot 2024-04-24 at 7 16 54 PM Manually verified the schema diff is sane. Example: ``` < "type": "boolean", < "description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again." --- > "description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again.", > "anyOf": [ > { > "type": "boolean" > }, > { > "type": "string", > "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" > } > ] ``` --- bundle/schema/schema.go | 17 ++ bundle/schema/schema_test.go | 490 +++++++++++++++++++++++++++++++---- libs/dyn/dynvar/ref.go | 4 +- 3 files changed, 457 insertions(+), 54 deletions(-) diff --git a/bundle/schema/schema.go b/bundle/schema/schema.go index b37f72d9b..ac0b4f2ec 100644 --- a/bundle/schema/schema.go +++ b/bundle/schema/schema.go @@ -6,6 +6,7 @@ import ( "reflect" "strings" + "github.com/databricks/cli/libs/dyn/dynvar" "github.com/databricks/cli/libs/jsonschema" ) @@ -167,6 +168,22 @@ func toSchema(golangType reflect.Type, docs *Docs, tracker *tracker) (*jsonschem } jsonSchema := &jsonschema.Schema{Type: rootJavascriptType} + // If the type is a non-string primitive, then we allow it to be a string + // provided it's a pure variable reference (ie only a single variable reference). + if rootJavascriptType == jsonschema.BooleanType || rootJavascriptType == jsonschema.NumberType { + jsonSchema = &jsonschema.Schema{ + AnyOf: []*jsonschema.Schema{ + { + Type: rootJavascriptType, + }, + { + Type: jsonschema.StringType, + Pattern: dynvar.VariableRegex, + }, + }, + } + } + if docs != nil { jsonSchema.Description = docs.Description } diff --git a/bundle/schema/schema_test.go b/bundle/schema/schema_test.go index d44a2082a..ea4fd1020 100644 --- a/bundle/schema/schema_test.go +++ b/bundle/schema/schema_test.go @@ -14,7 +14,15 @@ func TestIntSchema(t *testing.T) { expected := `{ - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }` schema, err := New(reflect.TypeOf(elemInt), nil) @@ -33,7 +41,15 @@ func TestBooleanSchema(t *testing.T) { expected := `{ - "type": "boolean" + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }` schema, err := New(reflect.TypeOf(elem), nil) @@ -101,46 +117,150 @@ func TestStructOfPrimitivesSchema(t *testing.T) { "type": "object", "properties": { "bool_val": { - "type": "boolean" + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "float32_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "float64_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "int16_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "int32_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "int64_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "int8_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "int_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "string_val": { "type": "string" }, "uint16_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "uint32_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "uint64_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "uint8_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "uint_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -200,7 +320,15 @@ func TestStructOfStructsSchema(t *testing.T) { "type": "object", "properties": { "a": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "b": { "type": "string" @@ -257,7 +385,15 @@ func TestStructOfMapsSchema(t *testing.T) { "my_map": { "type": "object", "additionalProperties": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } } }, @@ -339,7 +475,15 @@ func TestMapOfPrimitivesSchema(t *testing.T) { `{ "type": "object", "additionalProperties": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }` @@ -368,7 +512,15 @@ func TestMapOfStructSchema(t *testing.T) { "type": "object", "properties": { "my_int": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -398,7 +550,15 @@ func TestMapOfMapSchema(t *testing.T) { "additionalProperties": { "type": "object", "additionalProperties": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } } }` @@ -495,7 +655,15 @@ func TestSliceOfMapSchema(t *testing.T) { "items": { "type": "object", "additionalProperties": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } } }` @@ -525,7 +693,15 @@ func TestSliceOfStructSchema(t *testing.T) { "type": "object", "properties": { "my_int": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -575,7 +751,15 @@ func TestEmbeddedStructSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "country": { "type": "string" @@ -607,7 +791,15 @@ func TestEmbeddedStructSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "home": { "type": "object", @@ -694,7 +886,15 @@ func TestNonAnnotatedFieldsAreSkipped(t *testing.T) { "type": "object", "properties": { "bar": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -728,7 +928,15 @@ func TestDashFieldsAreSkipped(t *testing.T) { "type": "object", "properties": { "bar": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -773,7 +981,15 @@ func TestPointerInStructSchema(t *testing.T) { "type": "object", "properties": { "ptr_val2": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -782,13 +998,29 @@ func TestPointerInStructSchema(t *testing.T) { ] }, "float_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "ptr_bar": { "type": "object", "properties": { "ptr_val2": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -797,7 +1029,15 @@ func TestPointerInStructSchema(t *testing.T) { ] }, "ptr_int": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "ptr_string": { "type": "string" @@ -860,7 +1100,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "name": { "type": "string" @@ -875,7 +1123,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "name": { "type": "string" @@ -895,7 +1151,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "name": { "type": "string" @@ -910,7 +1174,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "name": { "type": "string" @@ -932,7 +1204,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "name": { "type": "string" @@ -950,7 +1230,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "name": { "type": "string" @@ -1028,16 +1316,40 @@ func TestFieldsWithoutOmitEmptyAreRequired(t *testing.T) { "type": "object", "properties": { "apple": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "bar": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "papaya": { "type": "object", "properties": { "a": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "b": { "type": "string" @@ -1111,7 +1423,15 @@ func TestDocIngestionForObject(t *testing.T) { "description": "docs for a" }, "b": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -1185,12 +1505,28 @@ func TestDocIngestionForSlice(t *testing.T) { "type": "object", "properties": { "guava": { - "type": "number", - "description": "docs for guava" + "description": "docs for guava", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "pineapple": { - "type": "number", - "description": "docs for pineapple" + "description": "docs for pineapple", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -1268,12 +1604,28 @@ func TestDocIngestionForMap(t *testing.T) { "type": "object", "properties": { "apple": { - "type": "number", - "description": "docs for apple" + "description": "docs for apple", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "mango": { - "type": "number", - "description": "docs for mango" + "description": "docs for mango", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -1324,8 +1676,16 @@ func TestDocIngestionForTopLevelPrimitive(t *testing.T) { "description": "docs for root", "properties": { "my_val": { - "type": "number", - "description": "docs for my val" + "description": "docs for my val", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] } }, "additionalProperties": false, @@ -1395,7 +1755,15 @@ func TestInterfaceGeneratesEmptySchema(t *testing.T) { "type": "object", "properties": { "apple": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "mango": {} }, @@ -1436,7 +1804,15 @@ func TestBundleReadOnlytag(t *testing.T) { "type": "object", "properties": { "apple": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "pokemon": { "type": "object", @@ -1488,7 +1864,15 @@ func TestBundleInternalTag(t *testing.T) { "type": "object", "properties": { "apple": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + } + ] }, "pokemon": { "type": "object", diff --git a/libs/dyn/dynvar/ref.go b/libs/dyn/dynvar/ref.go index a2047032a..e6340269f 100644 --- a/libs/dyn/dynvar/ref.go +++ b/libs/dyn/dynvar/ref.go @@ -6,7 +6,9 @@ import ( "github.com/databricks/cli/libs/dyn" ) -var re = regexp.MustCompile(`\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\}`) +const VariableRegex = `\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\}` + +var re = regexp.MustCompile(VariableRegex) // ref represents a variable reference. // It is a string [dyn.Value] contained in a larger [dyn.Value]. From e6523331037c93133828e51275b56eb71a67c10e Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 25 Apr 2024 16:51:10 +0530 Subject: [PATCH 167/286] Fix variable overrides in targets for non-string variables (#1397) Before variable overrides that were not string in a target would not work. This PR fixes that. Tested manually and via a unit test. --- bundle/config/root.go | 18 ++++--- .../databricks.yml | 41 ++++++++++++++++ bundle/tests/variables_test.go | 49 +++++++++++++++++++ 3 files changed, 101 insertions(+), 7 deletions(-) create mode 100644 bundle/tests/variables/variable_overrides_in_target/databricks.yml diff --git a/bundle/config/root.go b/bundle/config/root.go index 17f2747ef..fda3759dd 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -408,15 +408,19 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { // For each variable, normalize its contents if it is a single string. return dyn.Map(target, "variables", dyn.Foreach(func(_ dyn.Path, variable dyn.Value) (dyn.Value, error) { - if variable.Kind() != dyn.KindString { + switch variable.Kind() { + + case dyn.KindString, dyn.KindBool, dyn.KindFloat, dyn.KindInt: + // Rewrite the variable to a map with a single key called "default". + // This conforms to the variable type. Normalization back to the typed + // configuration will convert this to a string if necessary. + return dyn.NewValue(map[string]dyn.Value{ + "default": variable, + }, variable.Location()), nil + + default: return variable, nil } - - // Rewrite the variable to a map with a single key called "default". - // This conforms to the variable type. - return dyn.NewValue(map[string]dyn.Value{ - "default": variable, - }, variable.Location()), nil })) })) } diff --git a/bundle/tests/variables/variable_overrides_in_target/databricks.yml b/bundle/tests/variables/variable_overrides_in_target/databricks.yml new file mode 100644 index 000000000..4e52b5073 --- /dev/null +++ b/bundle/tests/variables/variable_overrides_in_target/databricks.yml @@ -0,0 +1,41 @@ +bundle: + name: foobar + +resources: + pipelines: + my_pipeline: + name: ${var.foo} + continuous: ${var.baz} + clusters: + - num_workers: ${var.bar} + + + +variables: + foo: + default: "a_string" + description: "A string variable" + + bar: + default: 42 + description: "An integer variable" + + baz: + default: true + description: "A boolean variable" + +targets: + use-default-variable-values: + + override-string-variable: + variables: + foo: "overridden_string" + + override-int-variable: + variables: + bar: 43 + + override-both-bool-and-string-variables: + variables: + foo: "overridden_string" + baz: false diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index fde36344f..f51802684 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -120,3 +120,52 @@ func TestVariablesWithTargetLookupOverrides(t *testing.T) { assert.Equal(t, "cluster: some-test-cluster", b.Config.Variables["d"].Lookup.String()) assert.Equal(t, "instance-pool: some-test-instance-pool", b.Config.Variables["e"].Lookup.String()) } + +func TestVariableTargetOverrides(t *testing.T) { + var tcases = []struct { + targetName string + pipelineName string + pipelineContinuous bool + pipelineNumWorkers int + }{ + { + "use-default-variable-values", + "a_string", + true, + 42, + }, + { + "override-string-variable", + "overridden_string", + true, + 42, + }, + { + "override-int-variable", + "a_string", + true, + 43, + }, + { + "override-both-bool-and-string-variables", + "overridden_string", + false, + 42, + }, + } + + for _, tcase := range tcases { + t.Run(tcase.targetName, func(t *testing.T) { + b := loadTarget(t, "./variables/variable_overrides_in_target", tcase.targetName) + diags := bundle.Apply(context.Background(), b, bundle.Seq( + mutator.SetVariables(), + mutator.ResolveVariableReferences("variables")), + ) + require.NoError(t, diags.Error()) + + assert.Equal(t, tcase.pipelineName, b.Config.Resources.Pipelines["my_pipeline"].Name) + assert.Equal(t, tcase.pipelineContinuous, b.Config.Resources.Pipelines["my_pipeline"].Continuous) + assert.Equal(t, tcase.pipelineNumWorkers, b.Config.Resources.Pipelines["my_pipeline"].Clusters[0].NumWorkers) + }) + } +} From d949f2b4f2a3cd59cd57cdefef25fefbbb29af1d Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 25 Apr 2024 16:53:50 +0530 Subject: [PATCH 168/286] Fix bundle schema for variables (#1396) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This PR fixes the variable schema to: 1. Allow non-string values in the "default" value of a variable. 2. Allow non-string overrides in a target for a variable. ## Tests Manually. There are no longer squiggly lines. Before: Screenshot 2024-04-24 at 3 26 43 PM After: Screenshot 2024-04-24 at 3 26 10 PM --- bundle/config/mutator/set_variables.go | 2 - cmd/bundle/schema.go | 56 ++++++++++++++++ libs/jsonschema/schema.go | 38 ++++++++++- libs/jsonschema/schema_test.go | 90 ++++++++++++++++++++++++++ 4 files changed, 183 insertions(+), 3 deletions(-) diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index bb88379e0..eae1fe2ab 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -53,8 +53,6 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Di } // We should have had a value to set for the variable at this point. - // TODO: use cmdio to request values for unassigned variables if current - // terminal is a tty. Tracked in https://github.com/databricks/cli/issues/379 return diag.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) } diff --git a/cmd/bundle/schema.go b/cmd/bundle/schema.go index 0f27142bd..b0d6b3dd5 100644 --- a/cmd/bundle/schema.go +++ b/cmd/bundle/schema.go @@ -7,9 +7,58 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/schema" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/jsonschema" "github.com/spf13/cobra" ) +func overrideVariables(s *jsonschema.Schema) error { + // Override schema for default values to allow for multiple primitive types. + // These are normalized to strings when converted to the typed representation. + err := s.SetByPath("variables.*.default", jsonschema.Schema{ + AnyOf: []*jsonschema.Schema{ + { + Type: jsonschema.StringType, + }, + { + Type: jsonschema.BooleanType, + }, + { + Type: jsonschema.NumberType, + }, + { + Type: jsonschema.IntegerType, + }, + }, + }) + if err != nil { + return err + } + + // Override schema for variables in targets to allow just specifying the value + // along side overriding the variable definition if needed. + ns, err := s.GetByPath("variables.*") + if err != nil { + return err + } + return s.SetByPath("targets.*.variables.*", jsonschema.Schema{ + AnyOf: []*jsonschema.Schema{ + { + Type: jsonschema.StringType, + }, + { + Type: jsonschema.BooleanType, + }, + { + Type: jsonschema.NumberType, + }, + { + Type: jsonschema.IntegerType, + }, + &ns, + }, + }) +} + func newSchemaCommand() *cobra.Command { cmd := &cobra.Command{ Use: "schema", @@ -30,6 +79,13 @@ func newSchemaCommand() *cobra.Command { return err } + // Override schema for variables to take into account normalization of default + // variable values and variable overrides in a target. + err = overrideVariables(schema) + if err != nil { + return err + } + // Print the JSON schema to stdout. result, err := json.MarshalIndent(schema, "", " ") if err != nil { diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index 967e2e9cd..f1e223ec7 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -6,6 +6,7 @@ import ( "os" "regexp" "slices" + "strings" "github.com/databricks/cli/internal/build" "golang.org/x/mod/semver" @@ -81,6 +82,41 @@ func (s *Schema) ParseString(v string) (any, error) { return fromString(v, s.Type) } +func (s *Schema) getByPath(path string) (*Schema, error) { + p := strings.Split(path, ".") + + res := s + for _, node := range p { + if node == "*" { + res = res.AdditionalProperties.(*Schema) + continue + } + var ok bool + res, ok = res.Properties[node] + if !ok { + return nil, fmt.Errorf("property %q not found in schema. Query path: %s", node, path) + } + } + return res, nil +} + +func (s *Schema) GetByPath(path string) (Schema, error) { + v, err := s.getByPath(path) + if err != nil { + return Schema{}, err + } + return *v, nil +} + +func (s *Schema) SetByPath(path string, v Schema) error { + dst, err := s.getByPath(path) + if err != nil { + return err + } + *dst = v + return nil +} + type Type string const ( @@ -97,7 +133,7 @@ const ( func (schema *Schema) validateSchemaPropertyTypes() error { for _, v := range schema.Properties { switch v.Type { - case NumberType, BooleanType, StringType, IntegerType: + case NumberType, BooleanType, StringType, IntegerType, ObjectType, ArrayType: continue case "int", "int32", "int64": return fmt.Errorf("type %s is not a recognized json schema type. Please use \"integer\" instead", v.Type) diff --git a/libs/jsonschema/schema_test.go b/libs/jsonschema/schema_test.go index cf1f12767..c365cf235 100644 --- a/libs/jsonschema/schema_test.go +++ b/libs/jsonschema/schema_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSchemaValidateTypeNames(t *testing.T) { @@ -305,3 +306,92 @@ func TestValidateSchemaSkippedPropertiesHaveDefaults(t *testing.T) { err = s.validate() assert.NoError(t, err) } + +func testSchema() *Schema { + return &Schema{ + Type: "object", + Properties: map[string]*Schema{ + "int_val": { + Type: "integer", + Default: int64(123), + }, + "string_val": { + Type: "string", + }, + "object_val": { + Type: "object", + Properties: map[string]*Schema{ + "bar": { + Type: "string", + Default: "baz", + }, + }, + AdditionalProperties: &Schema{ + Type: "object", + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Default: "zab", + }, + }, + }, + }, + }, + } + +} + +func TestSchemaGetByPath(t *testing.T) { + s := testSchema() + + ss, err := s.GetByPath("int_val") + require.NoError(t, err) + assert.Equal(t, Schema{ + Type: IntegerType, + Default: int64(123), + }, ss) + + ss, err = s.GetByPath("string_val") + require.NoError(t, err) + assert.Equal(t, Schema{ + Type: StringType, + }, ss) + + ss, err = s.GetByPath("object_val.bar") + require.NoError(t, err) + assert.Equal(t, Schema{ + Type: StringType, + Default: "baz", + }, ss) + + ss, err = s.GetByPath("object_val.*.foo") + require.NoError(t, err) + assert.Equal(t, Schema{ + Type: StringType, + Default: "zab", + }, ss) +} + +func TestSchemaSetByPath(t *testing.T) { + s := testSchema() + + err := s.SetByPath("int_val", Schema{ + Type: IntegerType, + Default: int64(456), + }) + require.NoError(t, err) + assert.Equal(t, int64(456), s.Properties["int_val"].Default) + + err = s.SetByPath("object_val.*.foo", Schema{ + Type: StringType, + Default: "zooby", + }) + require.NoError(t, err) + + ns, err := s.GetByPath("object_val.*.foo") + require.NoError(t, err) + assert.Equal(t, Schema{ + Type: StringType, + Default: "zooby", + }, ns) +} From db84a707cd56ad0c04a14dfe21b940c8261154c1 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 25 Apr 2024 13:25:26 +0200 Subject: [PATCH 169/286] Fix bundle documentation URL (#1399) Closes #1395. --- cmd/bundle/bundle.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/bundle/bundle.go b/cmd/bundle/bundle.go index 1db60d585..0880c9c44 100644 --- a/cmd/bundle/bundle.go +++ b/cmd/bundle/bundle.go @@ -9,7 +9,7 @@ func New() *cobra.Command { cmd := &cobra.Command{ Use: "bundle", Short: "Databricks Asset Bundles let you express data/AI/analytics projects as code.", - Long: "Databricks Asset Bundles let you express data/AI/analytics projects as code.\n\nOnline documentation: https://docs.databricks.com/en/dev-tools/bundles", + Long: "Databricks Asset Bundles let you express data/AI/analytics projects as code.\n\nOnline documentation: https://docs.databricks.com/en/dev-tools/bundles/index.html", GroupID: "development", } From a292eefc2edb2ae6a0b53068bfa4f07fb93b1075 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 25 Apr 2024 15:19:23 +0200 Subject: [PATCH 170/286] Release v0.218.1 (#1401) This is a bugfix release. CLI: * Pass `DATABRICKS_CONFIG_FILE` for `auth profiles` ([#1394](https://github.com/databricks/cli/pull/1394)). Bundles: * Show a better error message for using wheel tasks with older DBR versions ([#1373](https://github.com/databricks/cli/pull/1373)). * Allow variable references in non-string fields in the JSON schema ([#1398](https://github.com/databricks/cli/pull/1398)). * Fix variable overrides in targets for non-string variables ([#1397](https://github.com/databricks/cli/pull/1397)). * Fix bundle schema for variables ([#1396](https://github.com/databricks/cli/pull/1396)). * Fix bundle documentation URL ([#1399](https://github.com/databricks/cli/pull/1399)). Internal: * Removed autogenerated docs for the CLI commands ([#1392](https://github.com/databricks/cli/pull/1392)). * Remove `JSON.parse` call from homebrew-tap action ([#1393](https://github.com/databricks/cli/pull/1393)). * Ensure that Python dependencies are installed during upgrade ([#1390](https://github.com/databricks/cli/pull/1390)). --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b74498ec..898f0df9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Version changelog +## 0.218.1 + +This is a bugfix release. + +CLI: + * Pass `DATABRICKS_CONFIG_FILE` for `auth profiles` ([#1394](https://github.com/databricks/cli/pull/1394)). + +Bundles: + * Show a better error message for using wheel tasks with older DBR versions ([#1373](https://github.com/databricks/cli/pull/1373)). + * Allow variable references in non-string fields in the JSON schema ([#1398](https://github.com/databricks/cli/pull/1398)). + * Fix variable overrides in targets for non-string variables ([#1397](https://github.com/databricks/cli/pull/1397)). + * Fix bundle schema for variables ([#1396](https://github.com/databricks/cli/pull/1396)). + * Fix bundle documentation URL ([#1399](https://github.com/databricks/cli/pull/1399)). + +Internal: + * Removed autogenerated docs for the CLI commands ([#1392](https://github.com/databricks/cli/pull/1392)). + * Remove `JSON.parse` call from homebrew-tap action ([#1393](https://github.com/databricks/cli/pull/1393)). + * Ensure that Python dependencies are installed during upgrade ([#1390](https://github.com/databricks/cli/pull/1390)). + + + ## 0.218.0 This release marks the general availability of Databricks Asset Bundles. From 781688c9cb7699d9c0e1977d2f77334381c65640 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 16:41:24 +0200 Subject: [PATCH 171/286] Bump github.com/databricks/databricks-sdk-go from 0.38.0 to 0.39.0 (#1405) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.38.0 to 0.39.0.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.39.0

0.39.0

  • Ignored flaky integration tests (#894).
  • Added retries for "worker env WorkerEnvId(workerenv-XXXXX) not found" (#890).
  • Updated SDK to OpenAPI spec (#899).

Note: This release contains breaking changes, please see the API changes below for more details.

API Changes:

OpenAPI SHA: 21f9f1482f9d0d15228da59f2cd9f0863d2a6d55, Date: 2024-04-23

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

0.39.0

  • Ignored flaky integration tests (#894).
  • Added retries for "worker env WorkerEnvId(workerenv-XXXXX) not found" (#890).
  • Updated SDK to OpenAPI spec (#899).

Note: This release contains breaking changes, please see the API changes below for more details.

API Changes:

OpenAPI SHA: 21f9f1482f9d0d15228da59f2cd9f0863d2a6d55, Date: 2024-04-23

Commits

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.38.0&new-version=0.39.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .codegen/service.go.tmpl | 1 + bundle/schema/docs/bundle_descriptions.json | 205 ++++++++++++++++-- .../esm-enablement-account.go | 3 + .../automatic-cluster-update.go | 3 + .../csp-enablement/csp-enablement.go | 3 + .../esm-enablement/esm-enablement.go | 3 + cmd/workspace/jobs/jobs.go | 1 + cmd/workspace/libraries/libraries.go | 53 ++--- cmd/workspace/pipelines/pipelines.go | 2 + .../provider-exchanges/provider-exchanges.go | 22 +- .../serving-endpoints/serving-endpoints.go | 62 ++++++ go.mod | 2 +- go.sum | 4 +- 14 files changed, 296 insertions(+), 70 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 0aa4b1028..1f11c17bf 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -94684175b8bd65f8701f89729351f8069e8309c9 \ No newline at end of file +21f9f1482f9d0d15228da59f2cd9f0863d2a6d55 \ No newline at end of file diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 6aabb02c9..492b2132f 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -151,6 +151,7 @@ func new{{.PascalName}}() *cobra.Command { "provider-exchanges delete" "provider-exchanges delete-listing-from-exchange" "provider-exchanges list-exchanges-for-listing" + "provider-exchanges list-listings-for-exchange" -}} {{- $fullCommandName := (print $serviceName " " .KebabName) -}} {{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }} diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index ca889ae52..75499507d 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -46,6 +46,17 @@ "properties": { "fail_on_active_runs": { "description": "" + }, + "lock": { + "description": "", + "properties": { + "enabled": { + "description": "" + }, + "force": { + "description": "" + } + } } } }, @@ -76,6 +87,9 @@ "additionalproperties": { "description": "" } + }, + "use_legacy_run_as": { + "description": "" } } }, @@ -242,7 +256,7 @@ "description": "", "properties": { "client": { - "description": "*\nUser-friendly name for the client version: “client”: “1”\nThe version is a string, consisting of the major client version" + "description": "Client version used by the environment\nThe client is the user-facing environment of the runtime.\nEach client comes with a specific set of pre-installed libraries.\nThe version is a string, consisting of the major client version." }, "dependencies": { "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]", @@ -909,10 +923,10 @@ } }, "egg": { - "description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "jar": { - "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "maven": { "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", @@ -942,8 +956,11 @@ } } }, + "requirements": { + "description": "URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported.\nFor example: `{ \"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{ \"requirements\" : \"/Volumes/path/to/requirements.txt\" }`" + }, "whl": { - "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"whl\": \"/Workspace/path/to/library.whl\" }`, `{ \"whl\" : \"/Volumes/path/to/library.whl\" }` or\n`{ \"whl\": \"s3://my-bucket/library.whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." } } } @@ -1303,6 +1320,9 @@ }, "source": { "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + }, + "warehouse_id": { + "description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail." } } }, @@ -1526,7 +1546,7 @@ } }, "file": { - "description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", + "description": "If file, indicates that this job runs a SQL file in a remote Git repository.", "properties": { "path": { "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." @@ -1562,7 +1582,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -1679,7 +1699,7 @@ } }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -2415,6 +2435,17 @@ "continuous": { "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." }, + "deployment": { + "description": "Deployment type of this pipeline.", + "properties": { + "kind": { + "description": "The deployment method that manages the pipeline." + }, + "metadata_file_path": { + "description": "The path to the file containing metadata about the deployment." + } + } + }, "development": { "description": "Whether the pipeline is in Development mode. Defaults to false." }, @@ -2441,6 +2472,65 @@ "id": { "description": "Unique identifier for this pipeline." }, + "ingestion_definition": { + "description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings.", + "properties": { + "connection_name": { + "description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "ingestion_gateway_id": { + "description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "objects": { + "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", + "items": { + "description": "", + "properties": { + "schema": { + "description": "Select tables from a specific source schema.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store tables." + }, + "destination_schema": { + "description": "Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists." + }, + "source_catalog": { + "description": "The source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Required. Schema name in the source database." + } + } + }, + "table": { + "description": "Select tables from a specific source table.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store table." + }, + "destination_schema": { + "description": "Required. Destination schema to store table." + }, + "destination_table": { + "description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used." + }, + "source_catalog": { + "description": "Source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Schema name in the source database. Might be optional depending on the type of source." + }, + "source_table": { + "description": "Required. Table name in the source database." + } + } + } + } + } + } + } + }, "libraries": { "description": "Libraries or code needed by this deployment.", "items": { @@ -2682,6 +2772,17 @@ "properties": { "fail_on_active_runs": { "description": "" + }, + "lock": { + "description": "", + "properties": { + "enabled": { + "description": "" + }, + "force": { + "description": "" + } + } } } }, @@ -2878,7 +2979,7 @@ "description": "", "properties": { "client": { - "description": "*\nUser-friendly name for the client version: “client”: “1”\nThe version is a string, consisting of the major client version" + "description": "Client version used by the environment\nThe client is the user-facing environment of the runtime.\nEach client comes with a specific set of pre-installed libraries.\nThe version is a string, consisting of the major client version." }, "dependencies": { "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]", @@ -3545,10 +3646,10 @@ } }, "egg": { - "description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "jar": { - "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "maven": { "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", @@ -3578,8 +3679,11 @@ } } }, + "requirements": { + "description": "URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported.\nFor example: `{ \"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{ \"requirements\" : \"/Volumes/path/to/requirements.txt\" }`" + }, "whl": { - "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"whl\": \"/Workspace/path/to/library.whl\" }`, `{ \"whl\" : \"/Volumes/path/to/library.whl\" }` or\n`{ \"whl\": \"s3://my-bucket/library.whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." } } } @@ -3939,6 +4043,9 @@ }, "source": { "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + }, + "warehouse_id": { + "description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail." } } }, @@ -4162,7 +4269,7 @@ } }, "file": { - "description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", + "description": "If file, indicates that this job runs a SQL file in a remote Git repository.", "properties": { "path": { "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." @@ -4198,7 +4305,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -4315,7 +4422,7 @@ } }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", + "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -5051,6 +5158,17 @@ "continuous": { "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." }, + "deployment": { + "description": "Deployment type of this pipeline.", + "properties": { + "kind": { + "description": "The deployment method that manages the pipeline." + }, + "metadata_file_path": { + "description": "The path to the file containing metadata about the deployment." + } + } + }, "development": { "description": "Whether the pipeline is in Development mode. Defaults to false." }, @@ -5077,6 +5195,65 @@ "id": { "description": "Unique identifier for this pipeline." }, + "ingestion_definition": { + "description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings.", + "properties": { + "connection_name": { + "description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "ingestion_gateway_id": { + "description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "objects": { + "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", + "items": { + "description": "", + "properties": { + "schema": { + "description": "Select tables from a specific source schema.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store tables." + }, + "destination_schema": { + "description": "Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists." + }, + "source_catalog": { + "description": "The source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Required. Schema name in the source database." + } + } + }, + "table": { + "description": "Select tables from a specific source table.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store table." + }, + "destination_schema": { + "description": "Required. Destination schema to store table." + }, + "destination_table": { + "description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used." + }, + "source_catalog": { + "description": "Source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Schema name in the source database. Might be optional depending on the type of source." + }, + "source_table": { + "description": "Required. Table name in the source database." + } + } + } + } + } + } + } + }, "libraries": { "description": "Libraries or code needed by this deployment.", "items": { diff --git a/cmd/account/esm-enablement-account/esm-enablement-account.go b/cmd/account/esm-enablement-account/esm-enablement-account.go index dd407e2e5..a2e95ffe1 100755 --- a/cmd/account/esm-enablement-account/esm-enablement-account.go +++ b/cmd/account/esm-enablement-account/esm-enablement-account.go @@ -25,6 +25,9 @@ func New() *cobra.Command { setting is disabled for new workspaces. After workspace creation, account admins can enable enhanced security monitoring individually for each workspace.`, + + // This service is being previewed; hide from help output. + Hidden: true, } // Add methods diff --git a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go index 2385195bb..681dba7b3 100755 --- a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go +++ b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go @@ -22,6 +22,9 @@ func New() *cobra.Command { Short: `Controls whether automatic cluster update is enabled for the current workspace.`, Long: `Controls whether automatic cluster update is enabled for the current workspace. By default, it is turned off.`, + + // This service is being previewed; hide from help output. + Hidden: true, } // Add methods diff --git a/cmd/workspace/csp-enablement/csp-enablement.go b/cmd/workspace/csp-enablement/csp-enablement.go index 312591564..e82fdc2a4 100755 --- a/cmd/workspace/csp-enablement/csp-enablement.go +++ b/cmd/workspace/csp-enablement/csp-enablement.go @@ -25,6 +25,9 @@ func New() *cobra.Command { off. This settings can NOT be disabled once it is enabled.`, + + // This service is being previewed; hide from help output. + Hidden: true, } // Add methods diff --git a/cmd/workspace/esm-enablement/esm-enablement.go b/cmd/workspace/esm-enablement/esm-enablement.go index a65fe2f76..784c01f21 100755 --- a/cmd/workspace/esm-enablement/esm-enablement.go +++ b/cmd/workspace/esm-enablement/esm-enablement.go @@ -27,6 +27,9 @@ func New() *cobra.Command { If the compliance security profile is disabled, you can enable or disable this setting and it is not permanent.`, + + // This service is being previewed; hide from help output. + Hidden: true, } // Add methods diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 267dfc73b..e31c3f086 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -1513,6 +1513,7 @@ func newSubmit() *cobra.Command { // TODO: complex arg: pipeline_task // TODO: complex arg: python_wheel_task // TODO: complex arg: queue + // TODO: complex arg: run_as // TODO: complex arg: run_job_task cmd.Flags().StringVar(&submitReq.RunName, "run-name", submitReq.RunName, `An optional name for the run.`) // TODO: complex arg: spark_jar_task diff --git a/cmd/workspace/libraries/libraries.go b/cmd/workspace/libraries/libraries.go index e11e5a4c5..aed8843dc 100755 --- a/cmd/workspace/libraries/libraries.go +++ b/cmd/workspace/libraries/libraries.go @@ -25,18 +25,14 @@ func New() *cobra.Command { To make third-party or custom code available to notebooks and jobs running on your clusters, you can install a library. Libraries can be written in Python, - Java, Scala, and R. You can upload Java, Scala, and Python libraries and point - to external packages in PyPI, Maven, and CRAN repositories. + Java, Scala, and R. You can upload Python, Java, Scala and R libraries and + point to external packages in PyPI, Maven, and CRAN repositories. Cluster libraries can be used by all notebooks running on a cluster. You can install a cluster library directly from a public repository such as PyPI or Maven, using a previously installed workspace library, or using an init script. - When you install a library on a cluster, a notebook already attached to that - cluster will not immediately see the new library. You must first detach and - then reattach the notebook to the cluster. - When you uninstall a library from a cluster, the library is removed only when you restart the cluster. Until you restart the cluster, the status of the uninstalled library appears as Uninstall pending restart.`, @@ -75,9 +71,8 @@ func newAllClusterStatuses() *cobra.Command { cmd.Short = `Get all statuses.` cmd.Long = `Get all statuses. - Get the status of all libraries on all clusters. A status will be available - for all libraries installed on this cluster via the API or the libraries UI as - well as libraries set to be installed on all clusters via the libraries UI.` + Get the status of all libraries on all clusters. A status is returned for all + libraries installed on this cluster via the API or the libraries UI.` cmd.Annotations = make(map[string]string) @@ -110,13 +105,13 @@ func newAllClusterStatuses() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var clusterStatusOverrides []func( *cobra.Command, - *compute.ClusterStatusRequest, + *compute.ClusterStatus, ) func newClusterStatus() *cobra.Command { cmd := &cobra.Command{} - var clusterStatusReq compute.ClusterStatusRequest + var clusterStatusReq compute.ClusterStatus // TODO: short flags @@ -124,21 +119,13 @@ func newClusterStatus() *cobra.Command { cmd.Short = `Get status.` cmd.Long = `Get status. - Get the status of libraries on a cluster. A status will be available for all - libraries installed on this cluster via the API or the libraries UI as well as - libraries set to be installed on all clusters via the libraries UI. The order - of returned libraries will be as follows. - - 1. Libraries set to be installed on this cluster will be returned first. - Within this group, the final order will be order in which the libraries were - added to the cluster. - - 2. Libraries set to be installed on all clusters are returned next. Within - this group there is no order guarantee. - - 3. Libraries that were previously requested on this cluster or on all - clusters, but now marked for removal. Within this group there is no order - guarantee. + Get the status of libraries on a cluster. A status is returned for all + libraries installed on this cluster via the API or the libraries UI. The order + of returned libraries is as follows: 1. Libraries set to be installed on this + cluster, in the order that the libraries were added to the cluster, are + returned first. 2. Libraries that were previously requested to be installed on + this cluster or, but are now marked for removal, in no particular order, are + returned last. Arguments: CLUSTER_ID: Unique identifier of the cluster whose status should be retrieved.` @@ -195,12 +182,8 @@ func newInstall() *cobra.Command { cmd.Short = `Add a library.` cmd.Long = `Add a library. - Add libraries to be installed on a cluster. The installation is asynchronous; - it happens in the background after the completion of this request. - - **Note**: The actual set of libraries to be installed on a cluster is the - union of the libraries specified via this method and the libraries set to be - installed on all clusters via the libraries UI.` + Add libraries to install on a cluster. The installation is asynchronous; it + happens in the background after the completion of this request.` cmd.Annotations = make(map[string]string) @@ -259,9 +242,9 @@ func newUninstall() *cobra.Command { cmd.Short = `Uninstall libraries.` cmd.Long = `Uninstall libraries. - Set libraries to be uninstalled on a cluster. The libraries won't be - uninstalled until the cluster is restarted. Uninstalling libraries that are - not installed on the cluster will have no impact but is not an error.` + Set libraries to uninstall from a cluster. The libraries won't be uninstalled + until the cluster is restarted. A request to uninstall a library that is not + currently installed is ignored.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index b7c3235f8..5a55fd72b 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -940,11 +940,13 @@ func newUpdate() *cobra.Command { // TODO: array: clusters // TODO: map via StringToStringVar: configuration cmd.Flags().BoolVar(&updateReq.Continuous, "continuous", updateReq.Continuous, `Whether the pipeline is continuous or triggered.`) + // TODO: complex arg: deployment cmd.Flags().BoolVar(&updateReq.Development, "development", updateReq.Development, `Whether the pipeline is in Development mode.`) cmd.Flags().StringVar(&updateReq.Edition, "edition", updateReq.Edition, `Pipeline product edition.`) cmd.Flags().Int64Var(&updateReq.ExpectedLastModified, "expected-last-modified", updateReq.ExpectedLastModified, `If present, the last-modified time of the pipeline settings before the edit.`) // TODO: complex arg: filters cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Unique identifier for this pipeline.`) + // TODO: complex arg: ingestion_definition // TODO: array: libraries cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Friendly identifier for this pipeline.`) // TODO: array: notifications diff --git a/cmd/workspace/provider-exchanges/provider-exchanges.go b/cmd/workspace/provider-exchanges/provider-exchanges.go index fe1a9a3dc..c9f5818f5 100755 --- a/cmd/workspace/provider-exchanges/provider-exchanges.go +++ b/cmd/workspace/provider-exchanges/provider-exchanges.go @@ -508,28 +508,16 @@ func newListListingsForExchange() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No EXCHANGE_ID argument specified. Loading names for Provider Exchanges drop-down." - names, err := w.ProviderExchanges.ExchangeListingExchangeNameToExchangeIdMap(ctx, marketplace.ListExchangesForListingRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Provider Exchanges drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have ") - } listListingsForExchangeReq.ExchangeId = args[0] response := w.ProviderExchanges.ListListingsForExchange(ctx, listListingsForExchangeReq) diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 6706b99ea..dee341ab4 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -46,6 +46,7 @@ func New() *cobra.Command { cmd.AddCommand(newDelete()) cmd.AddCommand(newExportMetrics()) cmd.AddCommand(newGet()) + cmd.AddCommand(newGetOpenApi()) cmd.AddCommand(newGetPermissionLevels()) cmd.AddCommand(newGetPermissions()) cmd.AddCommand(newList()) @@ -379,6 +380,67 @@ func newGet() *cobra.Command { return cmd } +// start get-open-api command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOpenApiOverrides []func( + *cobra.Command, + *serving.GetOpenApiRequest, +) + +func newGetOpenApi() *cobra.Command { + cmd := &cobra.Command{} + + var getOpenApiReq serving.GetOpenApiRequest + + // TODO: short flags + + cmd.Use = "get-open-api NAME" + cmd.Short = `Get the schema for a serving endpoint.` + cmd.Long = `Get the schema for a serving endpoint. + + Get the query schema of the serving endpoint in OpenAPI format. The schema + contains information for the supported paths, input and output format and + datatypes. + + Arguments: + NAME: The name of the serving endpoint that the served model belongs to. This + field is required.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getOpenApiReq.Name = args[0] + + err = w.ServingEndpoints.GetOpenApi(ctx, getOpenApiReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOpenApiOverrides { + fn(cmd, &getOpenApiReq) + } + + return cmd +} + // start get-permission-levels command // Slice with functions to override default command behavior. diff --git a/go.mod b/go.mod index 6a991b0ec..7b2d31daa 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.38.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.39.0 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 8fe9109b5..5dc02d099 100644 --- a/go.sum +++ b/go.sum @@ -30,8 +30,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.38.0 h1:MQhOCWTkdKItG+n6ZwcXQv9FWBVXq9fax8VSZns2e+0= -github.com/databricks/databricks-sdk-go v0.38.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= +github.com/databricks/databricks-sdk-go v0.39.0 h1:nVnQYkk47SkEsRSXWkn6j7jBOxXgusjoo6xwbaHTGss= +github.com/databricks/databricks-sdk-go v0.39.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 153141d3eaab78c918a41ae950b3a4c2a24f109d Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Wed, 1 May 2024 10:22:35 +0200 Subject: [PATCH 172/286] Don't fail while parsing outdated terraform state (#1404) `terraform show -json` (`terraform.Show()`) fails if the state file contains resources with fields that non longer conform to the provider schemas. This can happen when you deploy a bundle with one version of the CLI, then updated the CLI to a version that uses different databricks terraform provider, and try to run `bundle run` or `bundle summary`. Those commands don't recreate local terraform state (only `terraform apply` or `plan` do) and terraform itself fails while parsing it. [Terraform docs](https://developer.hashicorp.com/terraform/language/state#format) point out that it's best to use `terraform show` after successful `apply` or `plan`. Here we parse the state ourselves. The state file format is internal to terraform, but it's more stable than our resource schemas. We only parse a subset of fields from the state, and only update ID and ModifiedStatus of bundle resources in the `terraform.Load` mutator. --- bundle/deploy/check_running_resources.go | 92 ++---- bundle/deploy/check_running_resources_test.go | 64 +--- bundle/deploy/terraform/convert.go | 88 +++--- bundle/deploy/terraform/convert_test.go | 293 +++++++++++------- bundle/deploy/terraform/load.go | 16 +- bundle/deploy/terraform/util.go | 55 +++- bundle/deploy/terraform/util_test.go | 99 ++++++ bundle/phases/deploy.go | 1 + 8 files changed, 423 insertions(+), 285 deletions(-) diff --git a/bundle/deploy/check_running_resources.go b/bundle/deploy/check_running_resources.go index 7f7a9bcac..a2305cd75 100644 --- a/bundle/deploy/check_running_resources.go +++ b/bundle/deploy/check_running_resources.go @@ -6,12 +6,11 @@ import ( "strconv" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" - "github.com/hashicorp/terraform-exec/tfexec" - tfjson "github.com/hashicorp/terraform-json" "golang.org/x/sync/errgroup" ) @@ -35,27 +34,11 @@ func (l *checkRunningResources) Apply(ctx context.Context, b *bundle.Bundle) dia if !b.Config.Bundle.Deployment.FailOnActiveRuns { return nil } - - tf := b.Terraform - if tf == nil { - return diag.Errorf("terraform not initialized") - } - - err := tf.Init(ctx, tfexec.Upgrade(true)) - if err != nil { - return diag.Errorf("terraform init: %v", err) - } - - state, err := b.Terraform.Show(ctx) + w := b.WorkspaceClient() + err := checkAnyResourceRunning(ctx, w, &b.Config.Resources) if err != nil { return diag.FromErr(err) } - - err = checkAnyResourceRunning(ctx, b.WorkspaceClient(), state) - if err != nil { - return diag.Errorf("deployment aborted, err: %v", err) - } - return nil } @@ -63,54 +46,43 @@ func CheckRunningResource() *checkRunningResources { return &checkRunningResources{} } -func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient, state *tfjson.State) error { - if state.Values == nil || state.Values.RootModule == nil { - return nil - } - +func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient, resources *config.Resources) error { errs, errCtx := errgroup.WithContext(ctx) - for _, resource := range state.Values.RootModule.Resources { - // Limit to resources. - if resource.Mode != tfjson.ManagedResourceMode { + for _, job := range resources.Jobs { + id := job.ID + if id == "" { continue } + errs.Go(func() error { + isRunning, err := IsJobRunning(errCtx, w, id) + // If there's an error retrieving the job, we assume it's not running + if err != nil { + return err + } + if isRunning { + return &ErrResourceIsRunning{resourceType: "job", resourceId: id} + } + return nil + }) + } - value, ok := resource.AttributeValues["id"] - if !ok { + for _, pipeline := range resources.Pipelines { + id := pipeline.ID + if id == "" { continue } - id, ok := value.(string) - if !ok { - continue - } - - switch resource.Type { - case "databricks_job": - errs.Go(func() error { - isRunning, err := IsJobRunning(errCtx, w, id) - // If there's an error retrieving the job, we assume it's not running - if err != nil { - return err - } - if isRunning { - return &ErrResourceIsRunning{resourceType: "job", resourceId: id} - } + errs.Go(func() error { + isRunning, err := IsPipelineRunning(errCtx, w, id) + // If there's an error retrieving the pipeline, we assume it's not running + if err != nil { return nil - }) - case "databricks_pipeline": - errs.Go(func() error { - isRunning, err := IsPipelineRunning(errCtx, w, id) - // If there's an error retrieving the pipeline, we assume it's not running - if err != nil { - return nil - } - if isRunning { - return &ErrResourceIsRunning{resourceType: "pipeline", resourceId: id} - } - return nil - }) - } + } + if isRunning { + return &ErrResourceIsRunning{resourceType: "pipeline", resourceId: id} + } + return nil + }) } return errs.Wait() diff --git a/bundle/deploy/check_running_resources_test.go b/bundle/deploy/check_running_resources_test.go index 7dc1fb865..d61c80fc4 100644 --- a/bundle/deploy/check_running_resources_test.go +++ b/bundle/deploy/check_running_resources_test.go @@ -5,36 +5,26 @@ import ( "errors" "testing" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" - tfjson "github.com/hashicorp/terraform-json" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) func TestIsAnyResourceRunningWithEmptyState(t *testing.T) { mock := mocks.NewMockWorkspaceClient(t) - state := &tfjson.State{} - err := checkAnyResourceRunning(context.Background(), mock.WorkspaceClient, state) + err := checkAnyResourceRunning(context.Background(), mock.WorkspaceClient, &config.Resources{}) require.NoError(t, err) } func TestIsAnyResourceRunningWithJob(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) - state := &tfjson.State{ - Values: &tfjson.StateValues{ - RootModule: &tfjson.StateModule{ - Resources: []*tfjson.StateResource{ - { - Type: "databricks_job", - AttributeValues: map[string]interface{}{ - "id": "123", - }, - Mode: tfjson.ManagedResourceMode, - }, - }, - }, + resources := &config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": {ID: "123"}, }, } @@ -46,7 +36,7 @@ func TestIsAnyResourceRunningWithJob(t *testing.T) { {RunId: 1234}, }, nil).Once() - err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) + err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources) require.ErrorContains(t, err, "job 123 is running") jobsApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ @@ -54,25 +44,15 @@ func TestIsAnyResourceRunningWithJob(t *testing.T) { ActiveOnly: true, }).Return([]jobs.BaseRun{}, nil).Once() - err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) + err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources) require.NoError(t, err) } func TestIsAnyResourceRunningWithPipeline(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) - state := &tfjson.State{ - Values: &tfjson.StateValues{ - RootModule: &tfjson.StateModule{ - Resources: []*tfjson.StateResource{ - { - Type: "databricks_pipeline", - AttributeValues: map[string]interface{}{ - "id": "123", - }, - Mode: tfjson.ManagedResourceMode, - }, - }, - }, + resources := &config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "pipeline1": {ID: "123"}, }, } @@ -84,7 +64,7 @@ func TestIsAnyResourceRunningWithPipeline(t *testing.T) { State: pipelines.PipelineStateRunning, }, nil).Once() - err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) + err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources) require.ErrorContains(t, err, "pipeline 123 is running") pipelineApi.EXPECT().Get(mock.Anything, pipelines.GetPipelineRequest{ @@ -93,25 +73,15 @@ func TestIsAnyResourceRunningWithPipeline(t *testing.T) { PipelineId: "123", State: pipelines.PipelineStateIdle, }, nil).Once() - err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) + err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources) require.NoError(t, err) } func TestIsAnyResourceRunningWithAPIFailure(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) - state := &tfjson.State{ - Values: &tfjson.StateValues{ - RootModule: &tfjson.StateModule{ - Resources: []*tfjson.StateResource{ - { - Type: "databricks_pipeline", - AttributeValues: map[string]interface{}{ - "id": "123", - }, - Mode: tfjson.ManagedResourceMode, - }, - }, - }, + resources := &config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "pipeline1": {ID: "123"}, }, } @@ -120,6 +90,6 @@ func TestIsAnyResourceRunningWithAPIFailure(t *testing.T) { PipelineId: "123", }).Return(nil, errors.New("API failure")).Once() - err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, state) + err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources) require.NoError(t, err) } diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index 0ae6751d0..d0b633582 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "reflect" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" @@ -19,15 +18,6 @@ func conv(from any, to any) { json.Unmarshal(buf, &to) } -func convRemoteToLocal(remote any, local any) resources.ModifiedStatus { - var modifiedStatus resources.ModifiedStatus - if reflect.ValueOf(local).Elem().IsNil() { - modifiedStatus = resources.ModifiedStatusDeleted - } - conv(remote, local) - return modifiedStatus -} - func convPermissions(acl []resources.Permission) *schema.ResourcePermissions { if len(acl) == 0 { return nil @@ -248,7 +238,7 @@ func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema tfroot.Provider = schema.NewProviders() // Convert each resource in the bundle to the equivalent Terraform representation. - resources, err := dyn.Get(root, "resources") + dynResources, err := dyn.Get(root, "resources") if err != nil { // If the resources key is missing, return an empty root. if dyn.IsNoSuchKeyError(err) { @@ -260,11 +250,20 @@ func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema tfroot.Resource = schema.NewResources() numResources := 0 - _, err = dyn.Walk(resources, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + _, err = dyn.Walk(dynResources, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { if len(p) < 2 { return v, nil } + // Skip resources that have been deleted locally. + modifiedStatus, err := dyn.Get(v, "modified_status") + if err == nil { + modifiedStatusStr, ok := modifiedStatus.AsString() + if ok && modifiedStatusStr == resources.ModifiedStatusDeleted { + return v, dyn.ErrSkip + } + } + typ := p[0].Key() key := p[1].Key() @@ -275,7 +274,7 @@ func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema } // Convert resource to Terraform representation. - err := c.Convert(ctx, key, v, tfroot.Resource) + err = c.Convert(ctx, key, v, tfroot.Resource) if err != nil { return dyn.InvalidValue, err } @@ -299,75 +298,72 @@ func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema return tfroot, nil } -func TerraformToBundle(state *tfjson.State, config *config.Root) error { - if state.Values != nil && state.Values.RootModule != nil { - for _, resource := range state.Values.RootModule.Resources { - // Limit to resources. - if resource.Mode != tfjson.ManagedResourceMode { - continue - } - +func TerraformToBundle(state *resourcesState, config *config.Root) error { + for _, resource := range state.Resources { + if resource.Mode != tfjson.ManagedResourceMode { + continue + } + for _, instance := range resource.Instances { switch resource.Type { case "databricks_job": - var tmp schema.ResourceJob - conv(resource.AttributeValues, &tmp) if config.Resources.Jobs == nil { config.Resources.Jobs = make(map[string]*resources.Job) } cur := config.Resources.Jobs[resource.Name] - // TODO: make sure we can unmarshall tf state properly and don't swallow errors - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.Job{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.Jobs[resource.Name] = cur case "databricks_pipeline": - var tmp schema.ResourcePipeline - conv(resource.AttributeValues, &tmp) if config.Resources.Pipelines == nil { config.Resources.Pipelines = make(map[string]*resources.Pipeline) } cur := config.Resources.Pipelines[resource.Name] - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.Pipeline{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.Pipelines[resource.Name] = cur case "databricks_mlflow_model": - var tmp schema.ResourceMlflowModel - conv(resource.AttributeValues, &tmp) if config.Resources.Models == nil { config.Resources.Models = make(map[string]*resources.MlflowModel) } cur := config.Resources.Models[resource.Name] - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.MlflowModel{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.Models[resource.Name] = cur case "databricks_mlflow_experiment": - var tmp schema.ResourceMlflowExperiment - conv(resource.AttributeValues, &tmp) if config.Resources.Experiments == nil { config.Resources.Experiments = make(map[string]*resources.MlflowExperiment) } cur := config.Resources.Experiments[resource.Name] - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.MlflowExperiment{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.Experiments[resource.Name] = cur case "databricks_model_serving": - var tmp schema.ResourceModelServing - conv(resource.AttributeValues, &tmp) if config.Resources.ModelServingEndpoints == nil { config.Resources.ModelServingEndpoints = make(map[string]*resources.ModelServingEndpoint) } cur := config.Resources.ModelServingEndpoints[resource.Name] - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.ModelServingEndpoint{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.ModelServingEndpoints[resource.Name] = cur case "databricks_registered_model": - var tmp schema.ResourceRegisteredModel - conv(resource.AttributeValues, &tmp) if config.Resources.RegisteredModels == nil { config.Resources.RegisteredModels = make(map[string]*resources.RegisteredModel) } cur := config.Resources.RegisteredModels[resource.Name] - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.RegisteredModel{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.RegisteredModels[resource.Name] = cur case "databricks_permissions": case "databricks_grants": diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 986599a79..58523bb49 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -17,7 +17,6 @@ import ( "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/serving" - tfjson "github.com/hashicorp/terraform-json" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -548,50 +547,86 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { bundleToTerraformEquivalenceTest(t, &config) } +func TestBundleToTerraformDeletedResources(t *testing.T) { + var job1 = resources.Job{ + JobSettings: &jobs.JobSettings{}, + } + var job2 = resources.Job{ + ModifiedStatus: resources.ModifiedStatusDeleted, + JobSettings: &jobs.JobSettings{}, + } + var config = config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my_job1": &job1, + "my_job2": &job2, + }, + }, + } + + vin, err := convert.FromTyped(config, dyn.NilValue) + require.NoError(t, err) + out, err := BundleToTerraformWithDynValue(context.Background(), vin) + require.NoError(t, err) + + _, ok := out.Resource.Job["my_job1"] + assert.True(t, ok) + _, ok = out.Resource.Job["my_job2"] + assert.False(t, ok) +} + func TestTerraformToBundleEmptyLocalResources(t *testing.T) { var config = config.Root{ Resources: config.Resources{}, } - var tfState = tfjson.State{ - Values: &tfjson.StateValues{ - RootModule: &tfjson.StateModule{ - Resources: []*tfjson.StateResource{ - { - Type: "databricks_job", - Mode: "managed", - Name: "test_job", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_pipeline", - Mode: "managed", - Name: "test_pipeline", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_mlflow_model", - Mode: "managed", - Name: "test_mlflow_model", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_mlflow_experiment", - Mode: "managed", - Name: "test_mlflow_experiment", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_model_serving", - Mode: "managed", - Name: "test_model_serving", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_registered_model", - Mode: "managed", - Name: "test_registered_model", - AttributeValues: map[string]interface{}{"id": "1"}, - }, + var tfState = resourcesState{ + Resources: []stateResource{ + { + Type: "databricks_job", + Mode: "managed", + Name: "test_job", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_pipeline", + Mode: "managed", + Name: "test_pipeline", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_mlflow_model", + Mode: "managed", + Name: "test_mlflow_model", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_mlflow_experiment", + Mode: "managed", + Name: "test_mlflow_experiment", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_model_serving", + Mode: "managed", + Name: "test_model_serving", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_registered_model", + Mode: "managed", + Name: "test_registered_model", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, }, }, }, @@ -667,8 +702,8 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, }, } - var tfState = tfjson.State{ - Values: nil, + var tfState = resourcesState{ + Resources: nil, } err := TerraformToBundle(&tfState, &config) assert.NoError(t, err) @@ -771,82 +806,102 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, }, } - var tfState = tfjson.State{ - Values: &tfjson.StateValues{ - RootModule: &tfjson.StateModule{ - Resources: []*tfjson.StateResource{ - { - Type: "databricks_job", - Mode: "managed", - Name: "test_job", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_job", - Mode: "managed", - Name: "test_job_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, - { - Type: "databricks_pipeline", - Mode: "managed", - Name: "test_pipeline", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_pipeline", - Mode: "managed", - Name: "test_pipeline_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, - { - Type: "databricks_mlflow_model", - Mode: "managed", - Name: "test_mlflow_model", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_mlflow_model", - Mode: "managed", - Name: "test_mlflow_model_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, - { - Type: "databricks_mlflow_experiment", - Mode: "managed", - Name: "test_mlflow_experiment", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_mlflow_experiment", - Mode: "managed", - Name: "test_mlflow_experiment_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, - { - Type: "databricks_model_serving", - Mode: "managed", - Name: "test_model_serving", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_model_serving", - Mode: "managed", - Name: "test_model_serving_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, - { - Type: "databricks_registered_model", - Mode: "managed", - Name: "test_registered_model", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_registered_model", - Mode: "managed", - Name: "test_registered_model_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, + var tfState = resourcesState{ + Resources: []stateResource{ + { + Type: "databricks_job", + Mode: "managed", + Name: "test_job", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_job", + Mode: "managed", + Name: "test_job_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_pipeline", + Mode: "managed", + Name: "test_pipeline", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_pipeline", + Mode: "managed", + Name: "test_pipeline_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_mlflow_model", + Mode: "managed", + Name: "test_mlflow_model", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_mlflow_model", + Mode: "managed", + Name: "test_mlflow_model_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_mlflow_experiment", + Mode: "managed", + Name: "test_mlflow_experiment", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_mlflow_experiment", + Mode: "managed", + Name: "test_mlflow_experiment_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_model_serving", + Mode: "managed", + Name: "test_model_serving", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_model_serving", + Mode: "managed", + Name: "test_model_serving_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_registered_model", + Mode: "managed", + Name: "test_registered_model", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_registered_model", + Mode: "managed", + Name: "test_registered_model_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, }, }, }, diff --git a/bundle/deploy/terraform/load.go b/bundle/deploy/terraform/load.go index fa0cd5b4f..3fb76855e 100644 --- a/bundle/deploy/terraform/load.go +++ b/bundle/deploy/terraform/load.go @@ -8,7 +8,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" "github.com/hashicorp/terraform-exec/tfexec" - tfjson "github.com/hashicorp/terraform-json" ) type loadMode int @@ -34,7 +33,7 @@ func (l *load) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return diag.Errorf("terraform init: %v", err) } - state, err := b.Terraform.Show(ctx) + state, err := ParseResourcesState(ctx, b) if err != nil { return diag.FromErr(err) } @@ -53,16 +52,13 @@ func (l *load) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return nil } -func (l *load) validateState(state *tfjson.State) error { - if state.Values == nil { - if slices.Contains(l.modes, ErrorOnEmptyState) { - return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?") - } - return nil +func (l *load) validateState(state *resourcesState) error { + if state.Version != SupportedStateVersion { + return fmt.Errorf("unsupported deployment state version: %d. Try re-deploying the bundle", state.Version) } - if state.Values.RootModule == nil { - return fmt.Errorf("malformed terraform state: RootModule not set") + if len(state.Resources) == 0 && slices.Contains(l.modes, ErrorOnEmptyState) { + return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?") } return nil diff --git a/bundle/deploy/terraform/util.go b/bundle/deploy/terraform/util.go index a5978b397..1a8a83ac7 100644 --- a/bundle/deploy/terraform/util.go +++ b/bundle/deploy/terraform/util.go @@ -1,14 +1,46 @@ package terraform import ( + "context" "encoding/json" + "errors" "io" + "os" + "path/filepath" + + "github.com/databricks/cli/bundle" + tfjson "github.com/hashicorp/terraform-json" ) -type state struct { +// Partial representation of the Terraform state file format. +// We are only interested global version and serial numbers, +// plus resource types, names, modes, and ids. +type resourcesState struct { + Version int `json:"version"` + Resources []stateResource `json:"resources"` +} + +const SupportedStateVersion = 4 + +type serialState struct { Serial int `json:"serial"` } +type stateResource struct { + Type string `json:"type"` + Name string `json:"name"` + Mode tfjson.ResourceMode `json:"mode"` + Instances []stateResourceInstance `json:"instances"` +} + +type stateResourceInstance struct { + Attributes stateInstanceAttributes `json:"attributes"` +} + +type stateInstanceAttributes struct { + ID string `json:"id"` +} + func IsLocalStateStale(local io.Reader, remote io.Reader) bool { localState, err := loadState(local) if err != nil { @@ -23,12 +55,12 @@ func IsLocalStateStale(local io.Reader, remote io.Reader) bool { return localState.Serial < remoteState.Serial } -func loadState(input io.Reader) (*state, error) { +func loadState(input io.Reader) (*serialState, error) { content, err := io.ReadAll(input) if err != nil { return nil, err } - var s state + var s serialState err = json.Unmarshal(content, &s) if err != nil { return nil, err @@ -36,3 +68,20 @@ func loadState(input io.Reader) (*state, error) { return &s, nil } + +func ParseResourcesState(ctx context.Context, b *bundle.Bundle) (*resourcesState, error) { + cacheDir, err := Dir(ctx, b) + if err != nil { + return nil, err + } + rawState, err := os.ReadFile(filepath.Join(cacheDir, TerraformStateFileName)) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return &resourcesState{Version: SupportedStateVersion}, nil + } + return nil, err + } + var state resourcesState + err = json.Unmarshal(rawState, &state) + return &state, err +} diff --git a/bundle/deploy/terraform/util_test.go b/bundle/deploy/terraform/util_test.go index 4f2cf2918..8949ebca8 100644 --- a/bundle/deploy/terraform/util_test.go +++ b/bundle/deploy/terraform/util_test.go @@ -1,11 +1,16 @@ package terraform import ( + "context" "fmt" + "os" + "path/filepath" "strings" "testing" "testing/iotest" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/stretchr/testify/assert" ) @@ -38,3 +43,97 @@ func TestLocalStateMarkNonStaleWhenRemoteFailsToLoad(t *testing.T) { remote := iotest.ErrReader(fmt.Errorf("Random error")) assert.False(t, IsLocalStateStale(local, remote)) } + +func TestParseResourcesStateWithNoFile(t *testing.T) { + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + Terraform: &config.Terraform{ + ExecPath: "terraform", + }, + }, + }, + } + state, err := ParseResourcesState(context.Background(), b) + assert.NoError(t, err) + assert.Equal(t, &resourcesState{Version: SupportedStateVersion}, state) +} + +func TestParseResourcesStateWithExistingStateFile(t *testing.T) { + ctx := context.Background() + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + Terraform: &config.Terraform{ + ExecPath: "terraform", + }, + }, + }, + } + cacheDir, err := Dir(ctx, b) + assert.NoError(t, err) + data := []byte(`{ + "version": 4, + "unknown_field": "hello", + "resources": [ + { + "mode": "managed", + "type": "databricks_pipeline", + "name": "test_pipeline", + "provider": "provider[\"registry.terraform.io/databricks/databricks\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "allow_duplicate_names": false, + "catalog": null, + "channel": "CURRENT", + "cluster": [], + "random_field": "random_value", + "configuration": { + "bundle.sourcePath": "/Workspace//Users/user/.bundle/test/dev/files/src" + }, + "continuous": false, + "development": true, + "edition": "ADVANCED", + "filters": [], + "id": "123", + "library": [], + "name": "test_pipeline", + "notification": [], + "photon": false, + "serverless": false, + "storage": "dbfs:/123456", + "target": "test_dev", + "timeouts": null, + "url": "https://test.com" + }, + "sensitive_attributes": [] + } + ] + } + ] + }`) + err = os.WriteFile(filepath.Join(cacheDir, TerraformStateFileName), data, os.ModePerm) + assert.NoError(t, err) + state, err := ParseResourcesState(ctx, b) + assert.NoError(t, err) + expected := &resourcesState{ + Version: 4, + Resources: []stateResource{ + { + Mode: "managed", + Type: "databricks_pipeline", + Name: "test_pipeline", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "123"}}, + }, + }, + }, + } + assert.Equal(t, expected, state) +} diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index fce98b038..4fc4f6300 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -36,6 +36,7 @@ func Deploy() bundle.Mutator { permissions.ApplyWorkspaceRootPermissions(), terraform.Interpolate(), terraform.Write(), + terraform.Load(), deploy.CheckRunningResource(), bundle.Defer( terraform.Apply(), From 507053ee50563bae098557246b4415b1b272d6ec Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 1 May 2024 14:07:03 +0530 Subject: [PATCH 173/286] Annotate DLT pipelines when deployed using DABs (#1410) ## Changes This PR annotates any pipelines that were deployed using DABs to have `deployment.kind` set to "BUNDLE", mirroring the annotation for Jobs (similar PR for jobs FYI: https://github.com/databricks/cli/pull/880). Breakglass UI is not yet available for pipelines, so this annotation will just be used for revenue attribution ATM. Note: The API field has been deployed in all regions including GovCloud. ## Tests Unit tests and manually. Manually verified that the kind and metadata_file_path are being set by DABs, and are returned by a GET API to a pipeline deployed using a DAB. Example: ``` "deployment": { "kind":"BUNDLE", "metadata_file_path":"/Users/shreyas.goenka@databricks.com/.bundle/bundle-playground/default/state/metadata.json" }, ``` --- bundle/deploy/metadata/annotate_jobs.go | 3 +- bundle/deploy/metadata/annotate_pipelines.go | 34 +++++++++ .../metadata/annotate_pipelines_test.go | 72 +++++++++++++++++++ bundle/deploy/metadata/upload.go | 9 ++- bundle/phases/initialize.go | 1 + 5 files changed, 115 insertions(+), 4 deletions(-) create mode 100644 bundle/deploy/metadata/annotate_pipelines.go create mode 100644 bundle/deploy/metadata/annotate_pipelines_test.go diff --git a/bundle/deploy/metadata/annotate_jobs.go b/bundle/deploy/metadata/annotate_jobs.go index 2b03a59b7..f42d46931 100644 --- a/bundle/deploy/metadata/annotate_jobs.go +++ b/bundle/deploy/metadata/annotate_jobs.go @@ -2,7 +2,6 @@ package metadata import ( "context" - "path" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" @@ -27,7 +26,7 @@ func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnosti job.JobSettings.Deployment = &jobs.JobDeployment{ Kind: jobs.JobDeploymentKindBundle, - MetadataFilePath: path.Join(b.Config.Workspace.StatePath, MetadataFileName), + MetadataFilePath: metadataFilePath(b), } job.JobSettings.EditMode = jobs.JobEditModeUiLocked job.JobSettings.Format = jobs.FormatMultiTask diff --git a/bundle/deploy/metadata/annotate_pipelines.go b/bundle/deploy/metadata/annotate_pipelines.go new file mode 100644 index 000000000..990f48907 --- /dev/null +++ b/bundle/deploy/metadata/annotate_pipelines.go @@ -0,0 +1,34 @@ +package metadata + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/pipelines" +) + +type annotatePipelines struct{} + +func AnnotatePipelines() bundle.Mutator { + return &annotatePipelines{} +} + +func (m *annotatePipelines) Name() string { + return "metadata.AnnotatePipelines" +} + +func (m *annotatePipelines) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + for _, pipeline := range b.Config.Resources.Pipelines { + if pipeline.PipelineSpec == nil { + continue + } + + pipeline.PipelineSpec.Deployment = &pipelines.PipelineDeployment{ + Kind: pipelines.DeploymentKindBundle, + MetadataFilePath: metadataFilePath(b), + } + } + + return nil +} diff --git a/bundle/deploy/metadata/annotate_pipelines_test.go b/bundle/deploy/metadata/annotate_pipelines_test.go new file mode 100644 index 000000000..448a022d0 --- /dev/null +++ b/bundle/deploy/metadata/annotate_pipelines_test.go @@ -0,0 +1,72 @@ +package metadata + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAnnotatePipelinesMutator(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + StatePath: "/a/b/c", + }, + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "my-pipeline-1": { + PipelineSpec: &pipelines.PipelineSpec{ + Name: "My Pipeline One", + }, + }, + "my-pipeline-2": { + PipelineSpec: &pipelines.PipelineSpec{ + Name: "My Pipeline Two", + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, AnnotatePipelines()) + require.NoError(t, diags.Error()) + + assert.Equal(t, + &pipelines.PipelineDeployment{ + Kind: pipelines.DeploymentKindBundle, + MetadataFilePath: "/a/b/c/metadata.json", + }, + b.Config.Resources.Pipelines["my-pipeline-1"].PipelineSpec.Deployment) + + assert.Equal(t, + &pipelines.PipelineDeployment{ + Kind: pipelines.DeploymentKindBundle, + MetadataFilePath: "/a/b/c/metadata.json", + }, + b.Config.Resources.Pipelines["my-pipeline-2"].PipelineSpec.Deployment) +} + +func TestAnnotatePipelinesMutatorPipelineWithoutASpec(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + StatePath: "/a/b/c", + }, + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "my-pipeline-1": {}, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, AnnotatePipelines()) + require.NoError(t, diags.Error()) +} diff --git a/bundle/deploy/metadata/upload.go b/bundle/deploy/metadata/upload.go index a040a0ae8..ee87816de 100644 --- a/bundle/deploy/metadata/upload.go +++ b/bundle/deploy/metadata/upload.go @@ -4,13 +4,18 @@ import ( "bytes" "context" "encoding/json" + "path" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" ) -const MetadataFileName = "metadata.json" +const metadataFileName = "metadata.json" + +func metadataFilePath(b *bundle.Bundle) string { + return path.Join(b.Config.Workspace.StatePath, metadataFileName) +} type upload struct{} @@ -33,5 +38,5 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return diag.FromErr(err) } - return diag.FromErr(f.Write(ctx, MetadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists)) + return diag.FromErr(f.Write(ctx, metadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists)) } diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 2f5eab302..ded2e1980 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -45,6 +45,7 @@ func Initialize() bundle.Mutator { permissions.ApplyBundlePermissions(), permissions.FilterCurrentUser(), metadata.AnnotateJobs(), + metadata.AnnotatePipelines(), terraform.Initialize(), scripts.Execute(config.ScriptPostInit), }, From 30215860e797c5c154862bff95ed2092a7e92d19 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 1 May 2024 16:34:37 +0530 Subject: [PATCH 174/286] Fix description memoization in bundle schema (#1409) ## Changes Fixes https://github.com/databricks/cli/issues/559 The CLI generation is now stable and does not produce a diff for the `bundle_descriptions.json` file. Before a pointer to the schema was stored in the memo, which would be mutated later to include the description. This lead to duplicate documentation for schema components that were used in multiple places. This PR fixes this issue. Eg: Before all references of `pause_status` would have the same description. ## Tests Added regression test. --- bundle/schema/docs.go | 2 +- bundle/schema/docs/bundle_descriptions.json | 28 ++++---- bundle/schema/openapi.go | 43 +++++++----- bundle/schema/openapi_test.go | 74 ++++++++++++++++++--- 4 files changed, 108 insertions(+), 39 deletions(-) diff --git a/bundle/schema/docs.go b/bundle/schema/docs.go index fe63e4328..5b960ea55 100644 --- a/bundle/schema/docs.go +++ b/bundle/schema/docs.go @@ -70,7 +70,7 @@ func UpdateBundleDescriptions(openapiSpecPath string) (*Docs, error) { } openapiReader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } // Generate descriptions for the "resources" field diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 75499507d..01d37dd71 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -756,7 +756,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Indicate whether this schedule is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required." @@ -813,7 +813,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in Databricks workspace.\n* `GIT`: Project is located in cloud Git provider." }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -972,7 +972,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "If new_cluster, a description of a new cluster that is created for each run.", "properties": { "apply_policy_default_values": { "description": "" @@ -1474,7 +1474,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." } } }, @@ -1552,7 +1552,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider." } } }, @@ -1654,10 +1654,10 @@ } }, "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Whether this trigger is paused or not." }, "table": { - "description": "", + "description": "Old table trigger settings name. Deprecated in favor of `table_update`.", "properties": { "condition": { "description": "The table(s) condition based on which to trigger a job run." @@ -3479,7 +3479,7 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Indicate whether this schedule is paused or not." }, "quartz_cron_expression": { "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required." @@ -3536,7 +3536,7 @@ "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in Databricks workspace.\n* `GIT`: Project is located in cloud Git provider." }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." @@ -3695,7 +3695,7 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for each task.", + "description": "If new_cluster, a description of a new cluster that is created for each run.", "properties": { "apply_policy_default_values": { "description": "" @@ -4197,7 +4197,7 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." } } }, @@ -4275,7 +4275,7 @@ "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." }, "source": { - "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider." } } }, @@ -4377,10 +4377,10 @@ } }, "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Whether this trigger is paused or not." }, "table": { - "description": "", + "description": "Old table trigger settings name. Deprecated in favor of `table_update`.", "properties": { "condition": { "description": "The table(s) condition based on which to trigger a job run." diff --git a/bundle/schema/openapi.go b/bundle/schema/openapi.go index fe329e7ac..1756d5165 100644 --- a/bundle/schema/openapi.go +++ b/bundle/schema/openapi.go @@ -10,17 +10,21 @@ import ( ) type OpenapiReader struct { + // OpenAPI spec to read schemas from. OpenapiSpec *openapi.Specification - Memo map[string]*jsonschema.Schema + + // In-memory cache of schemas read from the OpenAPI spec. + memo map[string]jsonschema.Schema } const SchemaPathPrefix = "#/components/schemas/" -func (reader *OpenapiReader) readOpenapiSchema(path string) (*jsonschema.Schema, error) { +// Read a schema directly from the OpenAPI spec. +func (reader *OpenapiReader) readOpenapiSchema(path string) (jsonschema.Schema, error) { schemaKey := strings.TrimPrefix(path, SchemaPathPrefix) // return early if we already have a computed schema - memoSchema, ok := reader.Memo[schemaKey] + memoSchema, ok := reader.memo[schemaKey] if ok { return memoSchema, nil } @@ -28,18 +32,18 @@ func (reader *OpenapiReader) readOpenapiSchema(path string) (*jsonschema.Schema, // check path is present in openapi spec openapiSchema, ok := reader.OpenapiSpec.Components.Schemas[schemaKey] if !ok { - return nil, fmt.Errorf("schema with path %s not found in openapi spec", path) + return jsonschema.Schema{}, fmt.Errorf("schema with path %s not found in openapi spec", path) } // convert openapi schema to the native schema struct bytes, err := json.Marshal(*openapiSchema) if err != nil { - return nil, err + return jsonschema.Schema{}, err } - jsonSchema := &jsonschema.Schema{} - err = json.Unmarshal(bytes, jsonSchema) + jsonSchema := jsonschema.Schema{} + err = json.Unmarshal(bytes, &jsonSchema) if err != nil { - return nil, err + return jsonschema.Schema{}, err } // A hack to convert a map[string]interface{} to *Schema @@ -49,23 +53,28 @@ func (reader *OpenapiReader) readOpenapiSchema(path string) (*jsonschema.Schema, if ok { b, err := json.Marshal(jsonSchema.AdditionalProperties) if err != nil { - return nil, err + return jsonschema.Schema{}, err } additionalProperties := &jsonschema.Schema{} err = json.Unmarshal(b, additionalProperties) if err != nil { - return nil, err + return jsonschema.Schema{}, err } jsonSchema.AdditionalProperties = additionalProperties } // store read schema into memo - reader.Memo[schemaKey] = jsonSchema + reader.memo[schemaKey] = jsonSchema return jsonSchema, nil } -// safe againt loops in refs +// Resolve all nested "$ref" references in the schema. This function unrolls a single +// level of "$ref" in the schema and calls into traverseSchema to resolve nested references. +// Thus this function and traverseSchema are mutually recursive. +// +// This function is safe against reference loops. If a reference loop is detected, an error +// is returned. func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *tracker) (*jsonschema.Schema, error) { if root.Reference == nil { return reader.traverseSchema(root, tracker) @@ -91,12 +100,12 @@ func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *t // in the memo root.Reference = nil - // unroll one level of reference + // unroll one level of reference. selfRef, err := reader.readOpenapiSchema(ref) if err != nil { return nil, err } - root = selfRef + root = &selfRef root.Description = description // traverse again to find new references @@ -108,6 +117,8 @@ func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *t return root, err } +// Traverse the nested properties of the schema to resolve "$ref" references. This function +// and safeResolveRefs are mutually recursive. func (reader *OpenapiReader) traverseSchema(root *jsonschema.Schema, tracker *tracker) (*jsonschema.Schema, error) { // case primitive (or invalid) if root.Type != jsonschema.ObjectType && root.Type != jsonschema.ArrayType { @@ -154,11 +165,11 @@ func (reader *OpenapiReader) readResolvedSchema(path string) (*jsonschema.Schema } tracker := newTracker() tracker.push(path, path) - root, err = reader.safeResolveRefs(root, tracker) + resolvedRoot, err := reader.safeResolveRefs(&root, tracker) if err != nil { return nil, tracker.errWithTrace(err.Error(), "") } - return root, nil + return resolvedRoot, nil } func (reader *OpenapiReader) jobsDocs() (*Docs, error) { diff --git a/bundle/schema/openapi_test.go b/bundle/schema/openapi_test.go index 0d71fa440..359b1e58a 100644 --- a/bundle/schema/openapi_test.go +++ b/bundle/schema/openapi_test.go @@ -48,7 +48,7 @@ func TestReadSchemaForObject(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -106,7 +106,7 @@ func TestReadSchemaForArray(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -152,7 +152,7 @@ func TestReadSchemaForMap(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -201,7 +201,7 @@ func TestRootReferenceIsResolved(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -251,7 +251,7 @@ func TestSelfReferenceLoopErrors(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -285,7 +285,7 @@ func TestCrossReferenceLoopErrors(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -330,7 +330,7 @@ func TestReferenceResolutionForMapInObject(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -400,7 +400,7 @@ func TestReferenceResolutionForArrayInObject(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -434,3 +434,61 @@ func TestReferenceResolutionForArrayInObject(t *testing.T) { t.Log("[DEBUG] expected: ", expected) assert.Equal(t, expected, string(fruitsSchemaJson)) } + +func TestReferenceResolutionDoesNotOverwriteDescriptions(t *testing.T) { + specString := `{ + "components": { + "schemas": { + "foo": { + "type": "number" + }, + "fruits": { + "type": "object", + "properties": { + "guava": { + "type": "object", + "description": "Guava is a fruit", + "$ref": "#/components/schemas/foo" + }, + "mango": { + "type": "object", + "description": "What is a mango?", + "$ref": "#/components/schemas/foo" + } + } + } + } + } + }` + spec := &openapi.Specification{} + reader := &OpenapiReader{ + OpenapiSpec: spec, + memo: make(map[string]jsonschema.Schema), + } + err := json.Unmarshal([]byte(specString), spec) + require.NoError(t, err) + + fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits") + require.NoError(t, err) + + fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ") + require.NoError(t, err) + + expected := `{ + "type": "object", + "properties": { + "guava": { + "type": "number", + "description": "Guava is a fruit" + }, + "mango": { + "type": "number", + "description": "What is a mango?" + } + } + }` + + t.Log("[DEBUG] actual: ", string(fruitsSchemaJson)) + t.Log("[DEBUG] expected: ", expected) + assert.Equal(t, expected, string(fruitsSchemaJson)) +} From 4724ecb324cf0286f6fd756f74e26689b516d924 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 1 May 2024 14:09:06 +0200 Subject: [PATCH 175/286] Release v0.219.0 (#1412) Bundles: * Don't fail while parsing outdated terraform state ([#1404](https://github.com/databricks/cli/pull/1404)). * Annotate DLT pipelines when deployed using DABs ([#1410](https://github.com/databricks/cli/pull/1410)). API Changes: * Changed `databricks libraries cluster-status` command. New request type is compute.ClusterStatus. * Changed `databricks libraries cluster-status` command to return . * Added `databricks serving-endpoints get-open-api` command. OpenAPI commit 21f9f1482f9d0d15228da59f2cd9f0863d2a6d55 (2024-04-23) Dependency updates: * Bump github.com/databricks/databricks-sdk-go from 0.38.0 to 0.39.0 ([#1405](https://github.com/databricks/cli/pull/1405)). --- CHANGELOG.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 898f0df9d..1bd824daf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Version changelog +## 0.219.0 + +Bundles: + * Don't fail while parsing outdated terraform state ([#1404](https://github.com/databricks/cli/pull/1404)). + * Annotate DLT pipelines when deployed using DABs ([#1410](https://github.com/databricks/cli/pull/1410)). + + +API Changes: + * Changed `databricks libraries cluster-status` command. New request type is compute.ClusterStatus. + * Changed `databricks libraries cluster-status` command to return . + * Added `databricks serving-endpoints get-open-api` command. + +OpenAPI commit 21f9f1482f9d0d15228da59f2cd9f0863d2a6d55 (2024-04-23) +Dependency updates: + * Bump github.com/databricks/databricks-sdk-go from 0.38.0 to 0.39.0 ([#1405](https://github.com/databricks/cli/pull/1405)). + ## 0.218.1 This is a bugfix release. From a393c87ed931f449b4a1b0d86024399ea1febfb9 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 6 May 2024 13:41:37 +0200 Subject: [PATCH 176/286] Upgrade TF provider to 1.42.0 (#1418) ## Changes Upgrade TF provider to 1.42.0 Also fixes #1258 --- bundle/internal/tf/codegen/schema/version.go | 2 +- bundle/internal/tf/schema/data_source_job.go | 39 +++++++------- bundle/internal/tf/schema/resource_cluster.go | 13 ++--- .../tf/schema/resource_cluster_policy.go | 13 ++--- bundle/internal/tf/schema/resource_job.go | 39 +++++++------- bundle/internal/tf/schema/resource_library.go | 17 ++++--- .../tf/schema/resource_mws_ncc_binding.go | 9 ++++ .../resource_mws_ncc_private_endpoint_rule.go | 17 +++++++ ...esource_mws_network_connectivity_config.go | 51 +++++++++++++++++++ bundle/internal/tf/schema/resources.go | 6 +++ bundle/internal/tf/schema/root.go | 2 +- 11 files changed, 150 insertions(+), 58 deletions(-) create mode 100644 bundle/internal/tf/schema/resource_mws_ncc_binding.go create mode 100644 bundle/internal/tf/schema/resource_mws_ncc_private_endpoint_rule.go create mode 100644 bundle/internal/tf/schema/resource_mws_network_connectivity_config.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 4fb4bf2c5..30885d961 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.40.0" +const ProviderVersion = "1.42.0" diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index dbd29f4ba..e5ec5afb7 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -243,12 +243,13 @@ type DataSourceJobJobSettingsSettingsLibraryPypi struct { } type DataSourceJobJobSettingsSettingsLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *DataSourceJobJobSettingsSettingsLibraryCran `json:"cran,omitempty"` - Maven *DataSourceJobJobSettingsSettingsLibraryMaven `json:"maven,omitempty"` - Pypi *DataSourceJobJobSettingsSettingsLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *DataSourceJobJobSettingsSettingsLibraryCran `json:"cran,omitempty"` + Maven *DataSourceJobJobSettingsSettingsLibraryMaven `json:"maven,omitempty"` + Pypi *DataSourceJobJobSettingsSettingsLibraryPypi `json:"pypi,omitempty"` } type DataSourceJobJobSettingsSettingsNewClusterAutoscale struct { @@ -558,12 +559,13 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryPypi struct { } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"` - Maven *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"` - Pypi *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"` + Maven *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"` + Pypi *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryPypi `json:"pypi,omitempty"` } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAutoscale struct { @@ -896,12 +898,13 @@ type DataSourceJobJobSettingsSettingsTaskLibraryPypi struct { } type DataSourceJobJobSettingsSettingsTaskLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *DataSourceJobJobSettingsSettingsTaskLibraryCran `json:"cran,omitempty"` - Maven *DataSourceJobJobSettingsSettingsTaskLibraryMaven `json:"maven,omitempty"` - Pypi *DataSourceJobJobSettingsSettingsTaskLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *DataSourceJobJobSettingsSettingsTaskLibraryCran `json:"cran,omitempty"` + Maven *DataSourceJobJobSettingsSettingsTaskLibraryMaven `json:"maven,omitempty"` + Pypi *DataSourceJobJobSettingsSettingsTaskLibraryPypi `json:"pypi,omitempty"` } type DataSourceJobJobSettingsSettingsTaskNewClusterAutoscale struct { diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index 6f866ba87..046e0bb43 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -146,12 +146,13 @@ type ResourceClusterLibraryPypi struct { } type ResourceClusterLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceClusterLibraryCran `json:"cran,omitempty"` - Maven *ResourceClusterLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceClusterLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceClusterLibraryCran `json:"cran,omitempty"` + Maven *ResourceClusterLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceClusterLibraryPypi `json:"pypi,omitempty"` } type ResourceClusterWorkloadTypeClients struct { diff --git a/bundle/internal/tf/schema/resource_cluster_policy.go b/bundle/internal/tf/schema/resource_cluster_policy.go index 637fe6455..d8111fef2 100644 --- a/bundle/internal/tf/schema/resource_cluster_policy.go +++ b/bundle/internal/tf/schema/resource_cluster_policy.go @@ -19,12 +19,13 @@ type ResourceClusterPolicyLibrariesPypi struct { } type ResourceClusterPolicyLibraries struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceClusterPolicyLibrariesCran `json:"cran,omitempty"` - Maven *ResourceClusterPolicyLibrariesMaven `json:"maven,omitempty"` - Pypi *ResourceClusterPolicyLibrariesPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceClusterPolicyLibrariesCran `json:"cran,omitempty"` + Maven *ResourceClusterPolicyLibrariesMaven `json:"maven,omitempty"` + Pypi *ResourceClusterPolicyLibrariesPypi `json:"pypi,omitempty"` } type ResourceClusterPolicy struct { diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 2431262c1..6958face8 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -243,12 +243,13 @@ type ResourceJobLibraryPypi struct { } type ResourceJobLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceJobLibraryCran `json:"cran,omitempty"` - Maven *ResourceJobLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceJobLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobLibraryPypi `json:"pypi,omitempty"` } type ResourceJobNewClusterAutoscale struct { @@ -558,12 +559,13 @@ type ResourceJobTaskForEachTaskTaskLibraryPypi struct { } type ResourceJobTaskForEachTaskTaskLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceJobTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"` - Maven *ResourceJobTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceJobTaskForEachTaskTaskLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobTaskForEachTaskTaskLibraryPypi `json:"pypi,omitempty"` } type ResourceJobTaskForEachTaskTaskNewClusterAutoscale struct { @@ -896,12 +898,13 @@ type ResourceJobTaskLibraryPypi struct { } type ResourceJobTaskLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceJobTaskLibraryCran `json:"cran,omitempty"` - Maven *ResourceJobTaskLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceJobTaskLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobTaskLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobTaskLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobTaskLibraryPypi `json:"pypi,omitempty"` } type ResourceJobTaskNewClusterAutoscale struct { diff --git a/bundle/internal/tf/schema/resource_library.go b/bundle/internal/tf/schema/resource_library.go index e2e83fb4f..385d992df 100644 --- a/bundle/internal/tf/schema/resource_library.go +++ b/bundle/internal/tf/schema/resource_library.go @@ -19,12 +19,13 @@ type ResourceLibraryPypi struct { } type ResourceLibrary struct { - ClusterId string `json:"cluster_id"` - Egg string `json:"egg,omitempty"` - Id string `json:"id,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceLibraryCran `json:"cran,omitempty"` - Maven *ResourceLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceLibraryPypi `json:"pypi,omitempty"` + ClusterId string `json:"cluster_id"` + Egg string `json:"egg,omitempty"` + Id string `json:"id,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceLibraryCran `json:"cran,omitempty"` + Maven *ResourceLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceLibraryPypi `json:"pypi,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_mws_ncc_binding.go b/bundle/internal/tf/schema/resource_mws_ncc_binding.go new file mode 100644 index 000000000..8beafb6f5 --- /dev/null +++ b/bundle/internal/tf/schema/resource_mws_ncc_binding.go @@ -0,0 +1,9 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceMwsNccBinding struct { + Id string `json:"id,omitempty"` + NetworkConnectivityConfigId string `json:"network_connectivity_config_id"` + WorkspaceId int `json:"workspace_id"` +} diff --git a/bundle/internal/tf/schema/resource_mws_ncc_private_endpoint_rule.go b/bundle/internal/tf/schema/resource_mws_ncc_private_endpoint_rule.go new file mode 100644 index 000000000..2acb374bc --- /dev/null +++ b/bundle/internal/tf/schema/resource_mws_ncc_private_endpoint_rule.go @@ -0,0 +1,17 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceMwsNccPrivateEndpointRule struct { + ConnectionState string `json:"connection_state,omitempty"` + CreationTime int `json:"creation_time,omitempty"` + Deactivated bool `json:"deactivated,omitempty"` + DeactivatedAt int `json:"deactivated_at,omitempty"` + EndpointName string `json:"endpoint_name,omitempty"` + GroupId string `json:"group_id"` + Id string `json:"id,omitempty"` + NetworkConnectivityConfigId string `json:"network_connectivity_config_id"` + ResourceId string `json:"resource_id"` + RuleId string `json:"rule_id,omitempty"` + UpdatedTime int `json:"updated_time,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_mws_network_connectivity_config.go b/bundle/internal/tf/schema/resource_mws_network_connectivity_config.go new file mode 100644 index 000000000..64ebab224 --- /dev/null +++ b/bundle/internal/tf/schema/resource_mws_network_connectivity_config.go @@ -0,0 +1,51 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAwsStableIpRule struct { + CidrBlocks []string `json:"cidr_blocks,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAzureServiceEndpointRule struct { + Subnets []string `json:"subnets,omitempty"` + TargetRegion string `json:"target_region,omitempty"` + TargetServices []string `json:"target_services,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRules struct { + AwsStableIpRule *ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAwsStableIpRule `json:"aws_stable_ip_rule,omitempty"` + AzureServiceEndpointRule *ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAzureServiceEndpointRule `json:"azure_service_endpoint_rule,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfigEgressConfigTargetRulesAzurePrivateEndpointRules struct { + ConnectionState string `json:"connection_state,omitempty"` + CreationTime int `json:"creation_time,omitempty"` + Deactivated bool `json:"deactivated,omitempty"` + DeactivatedAt int `json:"deactivated_at,omitempty"` + EndpointName string `json:"endpoint_name,omitempty"` + GroupId string `json:"group_id,omitempty"` + NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` + ResourceId string `json:"resource_id,omitempty"` + RuleId string `json:"rule_id,omitempty"` + UpdatedTime int `json:"updated_time,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfigEgressConfigTargetRules struct { + AzurePrivateEndpointRules []ResourceMwsNetworkConnectivityConfigEgressConfigTargetRulesAzurePrivateEndpointRules `json:"azure_private_endpoint_rules,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfigEgressConfig struct { + DefaultRules *ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRules `json:"default_rules,omitempty"` + TargetRules *ResourceMwsNetworkConnectivityConfigEgressConfigTargetRules `json:"target_rules,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfig struct { + AccountId string `json:"account_id,omitempty"` + CreationTime int `json:"creation_time,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` + Region string `json:"region"` + UpdatedTime int `json:"updated_time,omitempty"` + EgressConfig *ResourceMwsNetworkConnectivityConfigEgressConfig `json:"egress_config,omitempty"` +} diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index b1b1841d6..e5eacc867 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -45,6 +45,9 @@ type Resources struct { MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` MwsCustomerManagedKeys map[string]any `json:"databricks_mws_customer_managed_keys,omitempty"` MwsLogDelivery map[string]any `json:"databricks_mws_log_delivery,omitempty"` + MwsNccBinding map[string]any `json:"databricks_mws_ncc_binding,omitempty"` + MwsNccPrivateEndpointRule map[string]any `json:"databricks_mws_ncc_private_endpoint_rule,omitempty"` + MwsNetworkConnectivityConfig map[string]any `json:"databricks_mws_network_connectivity_config,omitempty"` MwsNetworks map[string]any `json:"databricks_mws_networks,omitempty"` MwsPermissionAssignment map[string]any `json:"databricks_mws_permission_assignment,omitempty"` MwsPrivateAccessSettings map[string]any `json:"databricks_mws_private_access_settings,omitempty"` @@ -137,6 +140,9 @@ func NewResources() *Resources { MwsCredentials: make(map[string]any), MwsCustomerManagedKeys: make(map[string]any), MwsLogDelivery: make(map[string]any), + MwsNccBinding: make(map[string]any), + MwsNccPrivateEndpointRule: make(map[string]any), + MwsNetworkConnectivityConfig: make(map[string]any), MwsNetworks: make(map[string]any), MwsPermissionAssignment: make(map[string]any), MwsPrivateAccessSettings: make(map[string]any), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index be6852bc0..50d05daab 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.40.0" +const ProviderVersion = "1.42.0" func NewRoot() *Root { return &Root{ From 648309d939be8eee360b9d337cc817ab4bf733a9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 13:32:35 +0200 Subject: [PATCH 177/286] Bump golang.org/x/text from 0.14.0 to 0.15.0 (#1419) Bumps [golang.org/x/text](https://github.com/golang/text) from 0.14.0 to 0.15.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/text&package-manager=go_modules&previous-version=0.14.0&new-version=0.15.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7b2d31daa..5ba534106 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( golang.org/x/oauth2 v0.19.0 golang.org/x/sync v0.7.0 golang.org/x/term v0.19.0 - golang.org/x/text v0.14.0 + golang.org/x/text v0.15.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/go.sum b/go.sum index 5dc02d099..226969a87 100644 --- a/go.sum +++ b/go.sum @@ -214,8 +214,8 @@ golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 649016d50ddabd7b7210325057f0509660c102b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 13:32:55 +0200 Subject: [PATCH 178/286] Bump golang.org/x/oauth2 from 0.19.0 to 0.20.0 (#1421) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.19.0 to 0.20.0.
Commits
  • 84cb9f7 oauth2: fix typo in comment
  • 4b7f0bd go.mod: update cloud.google.com/go/compute/metadata dependency
  • e11eea8 microsoft: added DeviceAuthURL to AzureADEndpoint
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/oauth2&package-manager=go_modules&previous-version=0.19.0&new-version=0.20.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 5 ++--- go.sum | 10 ++++------ 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 5ba534106..475f66f38 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.17.0 - golang.org/x/oauth2 v0.19.0 + golang.org/x/oauth2 v0.20.0 golang.org/x/sync v0.7.0 golang.org/x/term v0.19.0 golang.org/x/text v0.15.0 @@ -32,8 +32,7 @@ require ( ) require ( - cloud.google.com/go/compute v1.23.4 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect diff --git a/go.sum b/go.sum index 226969a87..78f7bbd91 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= -cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -191,8 +189,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= -golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 95bbe2ece191c0446a881b6637a682c4c3f5034e Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 13 May 2024 17:46:43 +0530 Subject: [PATCH 179/286] Fix flaky tests for the parallel mutator (#1426) ## Changes Around 0.5% to 1% of the time, the tests would fail due to concurrent access to the underlying slice in the mutator. This PR makes the test thread safe preventing race conditions. Example of failed run: https://github.com/databricks/cli/actions/runs/9004657555/job/24738145829 --- bundle/parallel_test.go | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/bundle/parallel_test.go b/bundle/parallel_test.go index be1e33637..dfc7ddac9 100644 --- a/bundle/parallel_test.go +++ b/bundle/parallel_test.go @@ -2,6 +2,7 @@ package bundle import ( "context" + "sync" "testing" "github.com/databricks/cli/bundle/config" @@ -10,9 +11,14 @@ import ( ) type addToContainer struct { + t *testing.T container *[]int value int err bool + + // mu is a mutex that protects container. It is used to ensure that the + // container slice is only modified by one goroutine at a time. + mu *sync.Mutex } func (m *addToContainer) Apply(ctx context.Context, b ReadOnlyBundle) diag.Diagnostics { @@ -20,9 +26,10 @@ func (m *addToContainer) Apply(ctx context.Context, b ReadOnlyBundle) diag.Diagn return diag.Errorf("error") } - c := *m.container - c = append(c, m.value) - *m.container = c + m.mu.Lock() + *m.container = append(*m.container, m.value) + m.mu.Unlock() + return nil } @@ -36,9 +43,10 @@ func TestParallelMutatorWork(t *testing.T) { } container := []int{} - m1 := &addToContainer{container: &container, value: 1} - m2 := &addToContainer{container: &container, value: 2} - m3 := &addToContainer{container: &container, value: 3} + var mu sync.Mutex + m1 := &addToContainer{t: t, container: &container, value: 1, mu: &mu} + m2 := &addToContainer{t: t, container: &container, value: 2, mu: &mu} + m3 := &addToContainer{t: t, container: &container, value: 3, mu: &mu} m := Parallel(m1, m2, m3) @@ -57,9 +65,10 @@ func TestParallelMutatorWorkWithErrors(t *testing.T) { } container := []int{} - m1 := &addToContainer{container: &container, value: 1} - m2 := &addToContainer{container: &container, err: true, value: 2} - m3 := &addToContainer{container: &container, value: 3} + var mu sync.Mutex + m1 := &addToContainer{container: &container, value: 1, mu: &mu} + m2 := &addToContainer{container: &container, err: true, value: 2, mu: &mu} + m3 := &addToContainer{container: &container, value: 3, mu: &mu} m := Parallel(m1, m2, m3) From 63617253bdd81f3250faa2d8bff2bd37384f0982 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 14 May 2024 16:00:48 +0530 Subject: [PATCH 180/286] Assert customer marshalling is implemented for resources (#1425) ## Changes This PR ensures every resource implements a custom marshaller / unmarshaller. This is required because we directly embed Go SDK structs. which implement custom marshalling overrides. Since the struct is embedded, the [customer marshalling overrides](https://pkg.go.dev/encoding/json#example-package-CustomMarshalJSON) are promoted to the top level. If the embedded struct itself is nil, then JSON marshal / unmarshal will panic because it tries to call `MarshalJSON` / `UnmarshalJSON` on a nil object. Fixing this issue at the Go SDK level does not seem possible. Discussed with @hectorcast-db. --- bundle/config/resources_test.go | 56 +++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/bundle/config/resources_test.go b/bundle/config/resources_test.go index 9c4104e4d..7415029b1 100644 --- a/bundle/config/resources_test.go +++ b/bundle/config/resources_test.go @@ -1,6 +1,8 @@ package config import ( + "encoding/json" + "reflect" "testing" "github.com/databricks/cli/bundle/config/paths" @@ -125,3 +127,57 @@ func TestVerifySafeMergeForRegisteredModels(t *testing.T) { err := r.VerifySafeMerge(&other) assert.ErrorContains(t, err, "multiple resources named bar (registered_model at bar.yml, registered_model at bar2.yml)") } + +// This test ensures that all resources have a custom marshaller and unmarshaller. +// This is required because DABs resources map to Databricks APIs, and they do so +// by embedding the corresponding Go SDK structs. +// +// Go SDK structs often implement custom marshalling and unmarshalling methods (based on the API specifics). +// If the Go SDK struct implements custom marshalling and unmarshalling and we do not +// for the resources at the top level, marshalling and unmarshalling operations will panic. +// Thus we will be overly cautious and ensure that all resources need a custom marshaller and unmarshaller. +// +// Why do we not assert this using an interface to assert MarshalJSON and UnmarshalJSON +// are implemented at the top level? +// If a method is implemented for an embedded struct, the top level struct will +// also have that method and satisfy the interface. This is why we cannot assert +// that the methods are implemented at the top level using an interface. +// +// Why don't we use reflection to assert that the methods are implemented at the +// top level? +// Same problem as above, the golang reflection package does not seem to provide +// a way to directly assert that MarshalJSON and UnmarshalJSON are implemented +// at the top level. +func TestCustomMarshallerIsImplemented(t *testing.T) { + r := Resources{} + rt := reflect.TypeOf(r) + + for i := 0; i < rt.NumField(); i++ { + field := rt.Field(i) + + // Fields in Resources are expected be of the form map[string]*resourceStruct + assert.Equal(t, field.Type.Kind(), reflect.Map, "Resource %s is not a map", field.Name) + kt := field.Type.Key() + assert.Equal(t, kt.Kind(), reflect.String, "Resource %s is not a map with string keys", field.Name) + vt := field.Type.Elem() + assert.Equal(t, vt.Kind(), reflect.Ptr, "Resource %s is not a map with pointer values", field.Name) + + // Marshalling a resourceStruct will panic if resourceStruct does not have a custom marshaller + // This is because resourceStruct embeds a Go SDK struct that implements + // a custom marshaller. + // Eg: resource.Job implements MarshalJSON + v := reflect.Zero(vt.Elem()).Interface() + assert.NotPanics(t, func() { + json.Marshal(v) + }, "Resource %s does not have a custom marshaller", field.Name) + + // Unmarshalling a *resourceStruct will panic if the resource does not have a custom unmarshaller + // This is because resourceStruct embeds a Go SDK struct that implements + // a custom unmarshaller. + // Eg: *resource.Job implements UnmarshalJSON + v = reflect.New(vt.Elem()).Interface() + assert.NotPanics(t, func() { + json.Unmarshal([]byte("{}"), v) + }, "Resource %s does not have a custom unmarshaller", field.Name) + } +} From 5920da432007edb1d2c0109bf9bc4808215282f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 12:53:59 +0200 Subject: [PATCH 181/286] Bump golang.org/x/term from 0.19.0 to 0.20.0 (#1422) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.19.0 to 0.20.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.19.0&new-version=0.20.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 475f66f38..636fbf44a 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( golang.org/x/mod v0.17.0 golang.org/x/oauth2 v0.20.0 golang.org/x/sync v0.7.0 - golang.org/x/term v0.19.0 + golang.org/x/term v0.20.0 golang.org/x/text v0.15.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 @@ -59,7 +59,7 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/net v0.23.0 // indirect - golang.org/x/sys v0.19.0 // indirect + golang.org/x/sys v0.20.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.169.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect diff --git a/go.sum b/go.sum index 78f7bbd91..3dd6b0cb7 100644 --- a/go.sum +++ b/go.sum @@ -206,10 +206,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= From a71929b94399fe36ed56beeeb0bd12b71f186196 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 14 May 2024 16:28:55 +0530 Subject: [PATCH 182/286] Add line about Docker installation to README.md (#1363) Co-authored-by: Pieter Noordhuis --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 83051ccf7..5f3b78b79 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,18 @@ See https://github.com/databricks/cli/releases for releases and [the docs pages](https://docs.databricks.com/dev-tools/cli/databricks-cli.html) for installation instructions. +------ +You can use the CLI via a Docker image by pulling the image from `ghcr.io`. You can find all available versions +at: https://github.com/databricks/cli/pkgs/container/cli. +``` +docker pull ghcr.io/databricks/cli:latest +``` + +Example of how to run the CLI using the Docker image. More documentation is available at https://docs.databricks.com/dev-tools/bundles/airgapped-environment.html. +``` +docker run -e DATABRICKS_HOST=$YOUR_HOST_URL -e DATABRICKS_TOKEN=$YOUR_TOKEN ghcr.io/databricks/cli:latest current-user me +``` + ## Authentication This CLI follows the Databricks Unified Authentication principles. From 0a21428a4827a5dc2a26929b29f052dd6bbcb64c Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 14 May 2024 14:19:34 +0200 Subject: [PATCH 183/286] Upgrade to 1.43 terraform provider (#1429) ## Changes Upgrade to 1.43 terraform provider --- bundle/internal/tf/codegen/schema/version.go | 2 +- bundle/internal/tf/schema/root.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 30885d961..cf98e16e8 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.42.0" +const ProviderVersion = "1.43.0" diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 50d05daab..b1fed9424 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.42.0" +const ProviderVersion = "1.43.0" func NewRoot() *Root { return &Root{ From 2035516fde7f55c1cd424b9df5a76bfab5b7da4a Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Wed, 15 May 2024 14:41:44 +0200 Subject: [PATCH 184/286] Don't merge-in remote resources during depolyments (#1432) ## Changes `check_running_resources` now pulls the remote state without modifying the bundle state, similar to how it was doing before. This avoids a problem when we fail to compute deployment metadata for a deleted job (which we shouldn't do in the first place) `deploy_then_remove_resources_test` now also deploys and deletes a job (in addition to a pipeline), which catches the error that this PR fixes. ## Tests Unit and integ tests --- .../check_running_resources.go | 81 +++++++++++-------- .../check_running_resources_test.go | 45 ++++++++--- bundle/phases/deploy.go | 3 +- .../databricks_template_schema.json | 8 ++ .../template/bar.py | 2 + .../template/resources.yml.tmpl | 11 +++ .../deploy_then_remove_resources_test.go | 17 +++- 7 files changed, 117 insertions(+), 50 deletions(-) rename bundle/deploy/{ => terraform}/check_running_resources.go (60%) rename bundle/deploy/{ => terraform}/check_running_resources_test.go (75%) create mode 100644 internal/bundle/bundles/deploy_then_remove_resources/template/bar.py diff --git a/bundle/deploy/check_running_resources.go b/bundle/deploy/terraform/check_running_resources.go similarity index 60% rename from bundle/deploy/check_running_resources.go rename to bundle/deploy/terraform/check_running_resources.go index a2305cd75..737f773e5 100644 --- a/bundle/deploy/check_running_resources.go +++ b/bundle/deploy/terraform/check_running_resources.go @@ -1,4 +1,4 @@ -package deploy +package terraform import ( "context" @@ -6,11 +6,11 @@ import ( "strconv" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" + tfjson "github.com/hashicorp/terraform-json" "golang.org/x/sync/errgroup" ) @@ -34,8 +34,14 @@ func (l *checkRunningResources) Apply(ctx context.Context, b *bundle.Bundle) dia if !b.Config.Bundle.Deployment.FailOnActiveRuns { return nil } + + state, err := ParseResourcesState(ctx, b) + if err != nil && state == nil { + return diag.FromErr(err) + } + w := b.WorkspaceClient() - err := checkAnyResourceRunning(ctx, w, &b.Config.Resources) + err = checkAnyResourceRunning(ctx, w, state) if err != nil { return diag.FromErr(err) } @@ -46,43 +52,50 @@ func CheckRunningResource() *checkRunningResources { return &checkRunningResources{} } -func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient, resources *config.Resources) error { - errs, errCtx := errgroup.WithContext(ctx) - - for _, job := range resources.Jobs { - id := job.ID - if id == "" { - continue - } - errs.Go(func() error { - isRunning, err := IsJobRunning(errCtx, w, id) - // If there's an error retrieving the job, we assume it's not running - if err != nil { - return err - } - if isRunning { - return &ErrResourceIsRunning{resourceType: "job", resourceId: id} - } - return nil - }) +func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient, state *resourcesState) error { + if state == nil { + return nil } - for _, pipeline := range resources.Pipelines { - id := pipeline.ID - if id == "" { + errs, errCtx := errgroup.WithContext(ctx) + + for _, resource := range state.Resources { + if resource.Mode != tfjson.ManagedResourceMode { continue } - errs.Go(func() error { - isRunning, err := IsPipelineRunning(errCtx, w, id) - // If there's an error retrieving the pipeline, we assume it's not running - if err != nil { - return nil + for _, instance := range resource.Instances { + id := instance.Attributes.ID + if id == "" { + continue } - if isRunning { - return &ErrResourceIsRunning{resourceType: "pipeline", resourceId: id} + + switch resource.Type { + case "databricks_job": + errs.Go(func() error { + isRunning, err := IsJobRunning(errCtx, w, id) + // If there's an error retrieving the job, we assume it's not running + if err != nil { + return err + } + if isRunning { + return &ErrResourceIsRunning{resourceType: "job", resourceId: id} + } + return nil + }) + case "databricks_pipeline": + errs.Go(func() error { + isRunning, err := IsPipelineRunning(errCtx, w, id) + // If there's an error retrieving the pipeline, we assume it's not running + if err != nil { + return nil + } + if isRunning { + return &ErrResourceIsRunning{resourceType: "pipeline", resourceId: id} + } + return nil + }) } - return nil - }) + } } return errs.Wait() diff --git a/bundle/deploy/check_running_resources_test.go b/bundle/deploy/terraform/check_running_resources_test.go similarity index 75% rename from bundle/deploy/check_running_resources_test.go rename to bundle/deploy/terraform/check_running_resources_test.go index d61c80fc4..a1bbbd37b 100644 --- a/bundle/deploy/check_running_resources_test.go +++ b/bundle/deploy/terraform/check_running_resources_test.go @@ -1,12 +1,10 @@ -package deploy +package terraform import ( "context" "errors" "testing" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -16,15 +14,22 @@ import ( func TestIsAnyResourceRunningWithEmptyState(t *testing.T) { mock := mocks.NewMockWorkspaceClient(t) - err := checkAnyResourceRunning(context.Background(), mock.WorkspaceClient, &config.Resources{}) + err := checkAnyResourceRunning(context.Background(), mock.WorkspaceClient, &resourcesState{}) require.NoError(t, err) } func TestIsAnyResourceRunningWithJob(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) - resources := &config.Resources{ - Jobs: map[string]*resources.Job{ - "job1": {ID: "123"}, + resources := &resourcesState{ + Resources: []stateResource{ + { + Type: "databricks_job", + Mode: "managed", + Name: "job1", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "123"}}, + }, + }, }, } @@ -50,9 +55,16 @@ func TestIsAnyResourceRunningWithJob(t *testing.T) { func TestIsAnyResourceRunningWithPipeline(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) - resources := &config.Resources{ - Pipelines: map[string]*resources.Pipeline{ - "pipeline1": {ID: "123"}, + resources := &resourcesState{ + Resources: []stateResource{ + { + Type: "databricks_pipeline", + Mode: "managed", + Name: "pipeline1", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "123"}}, + }, + }, }, } @@ -79,9 +91,16 @@ func TestIsAnyResourceRunningWithPipeline(t *testing.T) { func TestIsAnyResourceRunningWithAPIFailure(t *testing.T) { m := mocks.NewMockWorkspaceClient(t) - resources := &config.Resources{ - Pipelines: map[string]*resources.Pipeline{ - "pipeline1": {ID: "123"}, + resources := &resourcesState{ + Resources: []stateResource{ + { + Type: "databricks_pipeline", + Mode: "managed", + Name: "pipeline1", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "123"}}, + }, + }, }, } diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 4fc4f6300..46c389189 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -36,8 +36,7 @@ func Deploy() bundle.Mutator { permissions.ApplyWorkspaceRootPermissions(), terraform.Interpolate(), terraform.Write(), - terraform.Load(), - deploy.CheckRunningResource(), + terraform.CheckRunningResource(), bundle.Defer( terraform.Apply(), bundle.Seq( diff --git a/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json b/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json index 8fca7a7c4..f03ad1c2b 100644 --- a/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json +++ b/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json @@ -3,6 +3,14 @@ "unique_id": { "type": "string", "description": "Unique ID for pipeline name" + }, + "spark_version": { + "type": "string", + "description": "Spark version used for job cluster" + }, + "node_type_id": { + "type": "string", + "description": "Node type id for job cluster" } } } diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/bar.py b/internal/bundle/bundles/deploy_then_remove_resources/template/bar.py new file mode 100644 index 000000000..4914a7436 --- /dev/null +++ b/internal/bundle/bundles/deploy_then_remove_resources/template/bar.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("hello") diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl b/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl index e3a676770..f3be9aafd 100644 --- a/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl +++ b/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl @@ -1,4 +1,15 @@ resources: + jobs: + foo: + name: test-bundle-job-{{.unique_id}} + tasks: + - task_key: my_notebook_task + new_cluster: + num_workers: 1 + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + notebook_task: + notebook_path: "./bar.py" pipelines: bar: name: test-bundle-pipeline-{{.unique_id}} diff --git a/internal/bundle/deploy_then_remove_resources_test.go b/internal/bundle/deploy_then_remove_resources_test.go index 72baf798c..66ec5c16a 100644 --- a/internal/bundle/deploy_then_remove_resources_test.go +++ b/internal/bundle/deploy_then_remove_resources_test.go @@ -5,7 +5,9 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -15,9 +17,12 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ - "unique_id": uniqueId, + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, }) require.NoError(t, err) @@ -31,6 +36,12 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) { require.NoError(t, err) assert.Equal(t, pipeline.Name, pipelineName) + // assert job is created + jobName := "test-bundle-job-" + uniqueId + job, err := w.Jobs.GetBySettingsName(ctx, jobName) + require.NoError(t, err) + assert.Equal(t, job.Settings.Name, jobName) + // delete resources.yml err = os.Remove(filepath.Join(bundleRoot, "resources.yml")) require.NoError(t, err) @@ -43,6 +54,10 @@ func TestAccBundleDeployThenRemoveResources(t *testing.T) { _, err = w.Pipelines.GetByName(ctx, pipelineName) assert.ErrorContains(t, err, "does not exist") + // assert job is deleted + _, err = w.Jobs.GetBySettingsName(ctx, jobName) + assert.ErrorContains(t, err, "does not exist") + t.Cleanup(func() { err = destroyBundle(t, ctx, bundleRoot) require.NoError(t, err) From 216d2b058aa519b31592861db7af3b69cf993193 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 May 2024 11:04:58 +0200 Subject: [PATCH 185/286] Bump github.com/databricks/databricks-sdk-go from 0.39.0 to 0.40.1 (#1431) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.39.0 to 0.40.1.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.40.1

  • Fixed codecov for repository (#909).
  • Add traceparent header to enable distributed tracing. (#914).
  • Log cancelled and failed requests (#919).

Dependency updates:

  • Bump golang.org/x/net from 0.22.0 to 0.24.0 (#884).
  • Bump golang.org/x/net from 0.17.0 to 0.23.0 in /examples/zerolog (#896).
  • Bump golang.org/x/net from 0.21.0 to 0.23.0 in /examples/slog (#897).

v0.40.0

0.40.0

  • Allow unlimited timeouts in retries (#904). By setting RETRY_TIMEOUT_SECONDS to a negative value, WorkspaceClient and AccountClient will retry retriable failures indefinitely. As a reminder, without setting this parameter, the default retry timeout is 5 minutes.

API Changes:

... (truncated)

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

0.40.1

  • Fixed codecov for repository (#909).
  • Add traceparent header to enable distributed tracing. (#914).
  • Log cancelled and failed requests (#919).

Dependency updates:

  • Bump golang.org/x/net from 0.22.0 to 0.24.0 (#884).
  • Bump golang.org/x/net from 0.17.0 to 0.23.0 in /examples/zerolog (#896).
  • Bump golang.org/x/net from 0.21.0 to 0.23.0 in /examples/slog (#897).

0.40.0

  • Allow unlimited timeouts in retries (#904). By setting RETRY_TIMEOUT_SECONDS to a negative value, WorkspaceClient and AccountClient will retry retriable failures indefinitely. As a reminder, without setting this parameter, the default retry timeout is 5 minutes.

API Changes:

... (truncated)

Commits

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.39.0&new-version=0.40.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Pieter Noordhuis --- .codegen/_openapi_sha | 2 +- .gitattributes | 4 +- bundle/schema/docs/bundle_descriptions.json | 4 +- .../csp-enablement-account.go | 2 +- .../esm-enablement-account.go | 2 +- cmd/workspace/apps/apps.go | 558 +++++++++++++++--- cmd/workspace/apps/overrides.go | 58 -- .../compliance-security-profile.go} | 18 +- cmd/workspace/dashboards/dashboards.go | 1 + .../enhanced-security-monitoring.go} | 18 +- .../model-versions/model-versions.go | 1 + cmd/workspace/queries/queries.go | 1 + cmd/workspace/settings/settings.go | 8 +- go.mod | 6 +- go.sum | 12 +- 15 files changed, 508 insertions(+), 187 deletions(-) delete mode 100644 cmd/workspace/apps/overrides.go rename cmd/workspace/{csp-enablement/csp-enablement.go => compliance-security-profile/compliance-security-profile.go} (89%) rename cmd/workspace/{esm-enablement/esm-enablement.go => enhanced-security-monitoring/enhanced-security-monitoring.go} (89%) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 1f11c17bf..f07cf44e5 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -21f9f1482f9d0d15228da59f2cd9f0863d2a6d55 \ No newline at end of file +9bb7950fa3390afb97abaa552934bc0a2e069de5 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index f9aa02d18..fb42588a7 100755 --- a/.gitattributes +++ b/.gitattributes @@ -37,6 +37,7 @@ cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true +cmd/workspace/compliance-security-profile/compliance-security-profile.go linguist-generated=true cmd/workspace/connections/connections.go linguist-generated=true cmd/workspace/consumer-fulfillments/consumer-fulfillments.go linguist-generated=true cmd/workspace/consumer-installations/consumer-installations.go linguist-generated=true @@ -44,13 +45,12 @@ cmd/workspace/consumer-listings/consumer-listings.go linguist-generated=true cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true -cmd/workspace/csp-enablement/csp-enablement.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true cmd/workspace/default-namespace/default-namespace.go linguist-generated=true -cmd/workspace/esm-enablement/esm-enablement.go linguist-generated=true +cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true cmd/workspace/experiments/experiments.go linguist-generated=true cmd/workspace/external-locations/external-locations.go linguist-generated=true cmd/workspace/functions/functions.go linguist-generated=true diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 01d37dd71..ba6fe8ce2 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -1582,7 +1582,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -4305,7 +4305,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", diff --git a/cmd/account/csp-enablement-account/csp-enablement-account.go b/cmd/account/csp-enablement-account/csp-enablement-account.go index 79819003b..d6fce9537 100755 --- a/cmd/account/csp-enablement-account/csp-enablement-account.go +++ b/cmd/account/csp-enablement-account/csp-enablement-account.go @@ -156,4 +156,4 @@ func newUpdate() *cobra.Command { return cmd } -// end service CSPEnablementAccount +// end service CspEnablementAccount diff --git a/cmd/account/esm-enablement-account/esm-enablement-account.go b/cmd/account/esm-enablement-account/esm-enablement-account.go index a2e95ffe1..49c21eb48 100755 --- a/cmd/account/esm-enablement-account/esm-enablement-account.go +++ b/cmd/account/esm-enablement-account/esm-enablement-account.go @@ -157,4 +157,4 @@ func newUpdate() *cobra.Command { return cmd } -// end service ESMEnablementAccount +// end service EsmEnablementAccount diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 1ea50e830..2ccd16c0c 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -4,6 +4,7 @@ package apps import ( "fmt" + "time" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" @@ -19,10 +20,10 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "apps", - Short: `Lakehouse Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.`, - Long: `Lakehouse Apps run directly on a customer’s Databricks instance, integrate - with their data, use and extend Databricks services, and enable users to - interact through single sign-on.`, + Short: `Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.`, + Long: `Apps run directly on a customer’s Databricks instance, integrate with their + data, use and extend Databricks services, and enable users to interact through + single sign-on.`, GroupID: "serving", Annotations: map[string]string{ "package": "serving", @@ -34,11 +35,15 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newCreate()) - cmd.AddCommand(newDeleteApp()) - cmd.AddCommand(newGetApp()) - cmd.AddCommand(newGetAppDeploymentStatus()) - cmd.AddCommand(newGetApps()) - cmd.AddCommand(newGetEvents()) + cmd.AddCommand(newCreateDeployment()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetDeployment()) + cmd.AddCommand(newGetEnvironment()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListDeployments()) + cmd.AddCommand(newStop()) + cmd.AddCommand(newUpdate()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { @@ -54,28 +59,53 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createOverrides []func( *cobra.Command, - *serving.DeployAppRequest, + *serving.CreateAppRequest, ) func newCreate() *cobra.Command { cmd := &cobra.Command{} - var createReq serving.DeployAppRequest + var createReq serving.CreateAppRequest var createJson flags.JsonFlag + var createSkipWait bool + var createTimeout time.Duration + + cmd.Flags().BoolVar(&createSkipWait, "no-wait", createSkipWait, `do not wait to reach IDLE state`) + cmd.Flags().DurationVar(&createTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach IDLE state`) // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - // TODO: any: resources + cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `The description of the app.`) - cmd.Use = "create" - cmd.Short = `Create and deploy an application.` - cmd.Long = `Create and deploy an application. + cmd.Use = "create NAME" + cmd.Short = `Create an App.` + cmd.Long = `Create an App. - Creates and deploys an application.` + Creates a new app. + + Arguments: + NAME: The name of the app. The name must contain only lowercase alphanumeric + characters and hyphens and be between 2 and 30 characters long. It must be + unique within the workspace.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -86,15 +116,35 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + if !cmd.Flags().Changed("json") { + createReq.Name = args[0] } - response, err := w.Apps.Create(ctx, createReq) + wait, err := w.Apps.Create(ctx, createReq) if err != nil { return err } - return cmdio.Render(ctx, response) + if createSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *serving.App) { + if i.Status == nil { + return + } + status := i.Status.State + statusMessage := fmt.Sprintf("current status: %s", status) + if i.Status != nil { + statusMessage = i.Status.Message + } + spinner <- statusMessage + }).GetWithTimeout(createTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) } // Disable completions since they are not applicable. @@ -109,30 +159,137 @@ func newCreate() *cobra.Command { return cmd } -// start delete-app command +// start create-deployment command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var deleteAppOverrides []func( +var createDeploymentOverrides []func( + *cobra.Command, + *serving.CreateAppDeploymentRequest, +) + +func newCreateDeployment() *cobra.Command { + cmd := &cobra.Command{} + + var createDeploymentReq serving.CreateAppDeploymentRequest + var createDeploymentJson flags.JsonFlag + + var createDeploymentSkipWait bool + var createDeploymentTimeout time.Duration + + cmd.Flags().BoolVar(&createDeploymentSkipWait, "no-wait", createDeploymentSkipWait, `do not wait to reach SUCCEEDED state`) + cmd.Flags().DurationVar(&createDeploymentTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach SUCCEEDED state`) + // TODO: short flags + cmd.Flags().Var(&createDeploymentJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create-deployment APP_NAME SOURCE_CODE_PATH" + cmd.Short = `Create an App Deployment.` + cmd.Long = `Create an App Deployment. + + Creates an app deployment for the app with the supplied name. + + Arguments: + APP_NAME: The name of the app. + SOURCE_CODE_PATH: The source code path of the deployment.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only APP_NAME as positional arguments. Provide 'source_code_path' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createDeploymentJson.Unmarshal(&createDeploymentReq) + if err != nil { + return err + } + } + createDeploymentReq.AppName = args[0] + if !cmd.Flags().Changed("json") { + createDeploymentReq.SourceCodePath = args[1] + } + + wait, err := w.Apps.CreateDeployment(ctx, createDeploymentReq) + if err != nil { + return err + } + if createDeploymentSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *serving.AppDeployment) { + if i.Status == nil { + return + } + status := i.Status.State + statusMessage := fmt.Sprintf("current status: %s", status) + if i.Status != nil { + statusMessage = i.Status.Message + } + spinner <- statusMessage + }).GetWithTimeout(createDeploymentTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createDeploymentOverrides { + fn(cmd, &createDeploymentReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( *cobra.Command, *serving.DeleteAppRequest, ) -func newDeleteApp() *cobra.Command { +func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteAppReq serving.DeleteAppRequest + var deleteReq serving.DeleteAppRequest // TODO: short flags - cmd.Use = "delete-app NAME" - cmd.Short = `Delete an application.` - cmd.Long = `Delete an application. + cmd.Use = "delete NAME" + cmd.Short = `Delete an App.` + cmd.Long = `Delete an App. - Delete an application definition + Deletes an app. Arguments: - NAME: The name of an application. This field is required.` + NAME: The name of the app.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true cmd.Annotations = make(map[string]string) @@ -146,13 +303,13 @@ func newDeleteApp() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - deleteAppReq.Name = args[0] + deleteReq.Name = args[0] - response, err := w.Apps.DeleteApp(ctx, deleteAppReq) + err = w.Apps.Delete(ctx, deleteReq) if err != nil { return err } - return cmdio.Render(ctx, response) + return nil } // Disable completions since they are not applicable. @@ -160,37 +317,40 @@ func newDeleteApp() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range deleteAppOverrides { - fn(cmd, &deleteAppReq) + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) } return cmd } -// start get-app command +// start get command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var getAppOverrides []func( +var getOverrides []func( *cobra.Command, *serving.GetAppRequest, ) -func newGetApp() *cobra.Command { +func newGet() *cobra.Command { cmd := &cobra.Command{} - var getAppReq serving.GetAppRequest + var getReq serving.GetAppRequest // TODO: short flags - cmd.Use = "get-app NAME" - cmd.Short = `Get definition for an application.` - cmd.Long = `Get definition for an application. + cmd.Use = "get NAME" + cmd.Short = `Get an App.` + cmd.Long = `Get an App. - Get an application definition + Retrieves information for the app with the supplied name. Arguments: - NAME: The name of an application. This field is required.` + NAME: The name of the app.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true cmd.Annotations = make(map[string]string) @@ -204,9 +364,9 @@ func newGetApp() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getAppReq.Name = args[0] + getReq.Name = args[0] - response, err := w.Apps.GetApp(ctx, getAppReq) + response, err := w.Apps.Get(ctx, getReq) if err != nil { return err } @@ -218,39 +378,104 @@ func newGetApp() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range getAppOverrides { - fn(cmd, &getAppReq) + for _, fn := range getOverrides { + fn(cmd, &getReq) } return cmd } -// start get-app-deployment-status command +// start get-deployment command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var getAppDeploymentStatusOverrides []func( +var getDeploymentOverrides []func( *cobra.Command, - *serving.GetAppDeploymentStatusRequest, + *serving.GetAppDeploymentRequest, ) -func newGetAppDeploymentStatus() *cobra.Command { +func newGetDeployment() *cobra.Command { cmd := &cobra.Command{} - var getAppDeploymentStatusReq serving.GetAppDeploymentStatusRequest + var getDeploymentReq serving.GetAppDeploymentRequest // TODO: short flags - cmd.Flags().StringVar(&getAppDeploymentStatusReq.IncludeAppLog, "include-app-log", getAppDeploymentStatusReq.IncludeAppLog, `Boolean flag to include application logs.`) - - cmd.Use = "get-app-deployment-status DEPLOYMENT_ID" - cmd.Short = `Get deployment status for an application.` - cmd.Long = `Get deployment status for an application. + cmd.Use = "get-deployment APP_NAME DEPLOYMENT_ID" + cmd.Short = `Get an App Deployment.` + cmd.Long = `Get an App Deployment. - Get deployment status for an application + Retrieves information for the app deployment with the supplied name and + deployment id. Arguments: - DEPLOYMENT_ID: The deployment id for an application. This field is required.` + APP_NAME: The name of the app. + DEPLOYMENT_ID: The unique id of the deployment.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getDeploymentReq.AppName = args[0] + getDeploymentReq.DeploymentId = args[1] + + response, err := w.Apps.GetDeployment(ctx, getDeploymentReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getDeploymentOverrides { + fn(cmd, &getDeploymentReq) + } + + return cmd +} + +// start get-environment command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getEnvironmentOverrides []func( + *cobra.Command, + *serving.GetAppEnvironmentRequest, +) + +func newGetEnvironment() *cobra.Command { + cmd := &cobra.Command{} + + var getEnvironmentReq serving.GetAppEnvironmentRequest + + // TODO: short flags + + cmd.Use = "get-environment NAME" + cmd.Short = `Get App Environment.` + cmd.Long = `Get App Environment. + + Retrieves app environment. + + Arguments: + NAME: The name of the app.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true cmd.Annotations = make(map[string]string) @@ -264,9 +489,9 @@ func newGetAppDeploymentStatus() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getAppDeploymentStatusReq.DeploymentId = args[0] + getEnvironmentReq.Name = args[0] - response, err := w.Apps.GetAppDeploymentStatus(ctx, getAppDeploymentStatusReq) + response, err := w.Apps.GetEnvironment(ctx, getEnvironmentReq) if err != nil { return err } @@ -278,41 +503,55 @@ func newGetAppDeploymentStatus() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range getAppDeploymentStatusOverrides { - fn(cmd, &getAppDeploymentStatusReq) + for _, fn := range getEnvironmentOverrides { + fn(cmd, &getEnvironmentReq) } return cmd } -// start get-apps command +// start list command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var getAppsOverrides []func( +var listOverrides []func( *cobra.Command, + *serving.ListAppsRequest, ) -func newGetApps() *cobra.Command { +func newList() *cobra.Command { cmd := &cobra.Command{} - cmd.Use = "get-apps" - cmd.Short = `List all applications.` - cmd.Long = `List all applications. + var listReq serving.ListAppsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Pagination token to go to the next page of apps.`) + + cmd.Use = "list" + cmd.Short = `List Apps.` + cmd.Long = `List Apps. - List all available applications` + Lists all apps in the workspace.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Apps.GetApps(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + + response := w.Apps.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -320,37 +559,43 @@ func newGetApps() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range getAppsOverrides { - fn(cmd) + for _, fn := range listOverrides { + fn(cmd, &listReq) } return cmd } -// start get-events command +// start list-deployments command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var getEventsOverrides []func( +var listDeploymentsOverrides []func( *cobra.Command, - *serving.GetEventsRequest, + *serving.ListAppDeploymentsRequest, ) -func newGetEvents() *cobra.Command { +func newListDeployments() *cobra.Command { cmd := &cobra.Command{} - var getEventsReq serving.GetEventsRequest + var listDeploymentsReq serving.ListAppDeploymentsRequest // TODO: short flags - cmd.Use = "get-events NAME" - cmd.Short = `Get deployment events for an application.` - cmd.Long = `Get deployment events for an application. + cmd.Flags().IntVar(&listDeploymentsReq.PageSize, "page-size", listDeploymentsReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listDeploymentsReq.PageToken, "page-token", listDeploymentsReq.PageToken, `Pagination token to go to the next page of apps.`) + + cmd.Use = "list-deployments APP_NAME" + cmd.Short = `List App Deployments.` + cmd.Long = `List App Deployments. - Get deployment events for an application + Lists all app deployments for the app with the supplied name. Arguments: - NAME: The name of an application. This field is required.` + APP_NAME: The name of the app.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true cmd.Annotations = make(map[string]string) @@ -364,9 +609,140 @@ func newGetEvents() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getEventsReq.Name = args[0] + listDeploymentsReq.AppName = args[0] - response, err := w.Apps.GetEvents(ctx, getEventsReq) + response := w.Apps.ListDeployments(ctx, listDeploymentsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listDeploymentsOverrides { + fn(cmd, &listDeploymentsReq) + } + + return cmd +} + +// start stop command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var stopOverrides []func( + *cobra.Command, + *serving.StopAppRequest, +) + +func newStop() *cobra.Command { + cmd := &cobra.Command{} + + var stopReq serving.StopAppRequest + + // TODO: short flags + + cmd.Use = "stop NAME" + cmd.Short = `Stop an App.` + cmd.Long = `Stop an App. + + Stops the active deployment of the app in the workspace. + + Arguments: + NAME: The name of the app.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + stopReq.Name = args[0] + + err = w.Apps.Stop(ctx, stopReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range stopOverrides { + fn(cmd, &stopReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *serving.UpdateAppRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq serving.UpdateAppRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `The description of the app.`) + + cmd.Use = "update NAME" + cmd.Short = `Update an App.` + cmd.Long = `Update an App. + + Updates the app with the supplied name. + + Arguments: + NAME: The name of the app. The name must contain only lowercase alphanumeric + characters and hyphens and be between 2 and 30 characters long. It must be + unique within the workspace.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.Name = args[0] + + response, err := w.Apps.Update(ctx, updateReq) if err != nil { return err } @@ -378,8 +754,8 @@ func newGetEvents() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range getEventsOverrides { - fn(cmd, &getEventsReq) + for _, fn := range updateOverrides { + fn(cmd, &updateReq) } return cmd diff --git a/cmd/workspace/apps/overrides.go b/cmd/workspace/apps/overrides.go deleted file mode 100644 index e38e139b5..000000000 --- a/cmd/workspace/apps/overrides.go +++ /dev/null @@ -1,58 +0,0 @@ -package apps - -import ( - "fmt" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/serving" - "github.com/spf13/cobra" -) - -func createOverride(cmd *cobra.Command, deployReq *serving.DeployAppRequest) { - var manifestYaml flags.YamlFlag - var resourcesYaml flags.YamlFlag - createJson := cmd.Flag("json").Value.(*flags.JsonFlag) - - // TODO: short flags - cmd.Flags().Var(&manifestYaml, "manifest", `either inline YAML string or @path/to/manifest.yaml`) - cmd.Flags().Var(&resourcesYaml, "resources", `either inline YAML string or @path/to/resources.yaml`) - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&deployReq) - if err != nil { - return err - } - } else if cmd.Flags().Changed("manifest") { - err = manifestYaml.Unmarshal(&deployReq.Manifest) - if err != nil { - return err - } - if cmd.Flags().Changed("resources") { - err = resourcesYaml.Unmarshal(&deployReq.Resources) - if err != nil { - return err - } - } - } else { - return fmt.Errorf("please provide command input in YAML format by specifying the --manifest flag or provide a json payload using the --json flag") - } - response, err := w.Apps.Create(ctx, *deployReq) - if err != nil { - return err - } - - return cmdio.Render(ctx, response) - } -} - -func init() { - createOverrides = append(createOverrides, createOverride) -} diff --git a/cmd/workspace/csp-enablement/csp-enablement.go b/cmd/workspace/compliance-security-profile/compliance-security-profile.go similarity index 89% rename from cmd/workspace/csp-enablement/csp-enablement.go rename to cmd/workspace/compliance-security-profile/compliance-security-profile.go index e82fdc2a4..efafb4627 100755 --- a/cmd/workspace/csp-enablement/csp-enablement.go +++ b/cmd/workspace/compliance-security-profile/compliance-security-profile.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package csp_enablement +package compliance_security_profile import ( "fmt" @@ -18,7 +18,7 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "csp-enablement", + Use: "compliance-security-profile", Short: `Controls whether to enable the compliance security profile for the current workspace.`, Long: `Controls whether to enable the compliance security profile for the current workspace. Enabling it on a workspace is permanent. By default, it is turned @@ -48,13 +48,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetCspEnablementSettingRequest, + *settings.GetComplianceSecurityProfileSettingRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetCspEnablementSettingRequest + var getReq settings.GetComplianceSecurityProfileSettingRequest // TODO: short flags @@ -78,7 +78,7 @@ func newGet() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Settings.CspEnablement().Get(ctx, getReq) + response, err := w.Settings.ComplianceSecurityProfile().Get(ctx, getReq) if err != nil { return err } @@ -103,13 +103,13 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var updateOverrides []func( *cobra.Command, - *settings.UpdateCspEnablementSettingRequest, + *settings.UpdateComplianceSecurityProfileSettingRequest, ) func newUpdate() *cobra.Command { cmd := &cobra.Command{} - var updateReq settings.UpdateCspEnablementSettingRequest + var updateReq settings.UpdateComplianceSecurityProfileSettingRequest var updateJson flags.JsonFlag // TODO: short flags @@ -141,7 +141,7 @@ func newUpdate() *cobra.Command { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } - response, err := w.Settings.CspEnablement().Update(ctx, updateReq) + response, err := w.Settings.ComplianceSecurityProfile().Update(ctx, updateReq) if err != nil { return err } @@ -160,4 +160,4 @@ func newUpdate() *cobra.Command { return cmd } -// end service CSPEnablement +// end service ComplianceSecurityProfile diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index 0500ebecf..1a143538b 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -386,6 +386,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this dashboard that appears in list views and at the top of the dashboard page.`) cmd.Flags().Var(&updateReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`) + // TODO: array: tags cmd.Use = "update DASHBOARD_ID" cmd.Short = `Change a dashboard definition.` diff --git a/cmd/workspace/esm-enablement/esm-enablement.go b/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go similarity index 89% rename from cmd/workspace/esm-enablement/esm-enablement.go rename to cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go index 784c01f21..86b4244d5 100755 --- a/cmd/workspace/esm-enablement/esm-enablement.go +++ b/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package esm_enablement +package enhanced_security_monitoring import ( "fmt" @@ -18,7 +18,7 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "esm-enablement", + Use: "enhanced-security-monitoring", Short: `Controls whether enhanced security monitoring is enabled for the current workspace.`, Long: `Controls whether enhanced security monitoring is enabled for the current workspace. If the compliance security profile is enabled, this is @@ -50,13 +50,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *settings.GetEsmEnablementSettingRequest, + *settings.GetEnhancedSecurityMonitoringSettingRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq settings.GetEsmEnablementSettingRequest + var getReq settings.GetEnhancedSecurityMonitoringSettingRequest // TODO: short flags @@ -80,7 +80,7 @@ func newGet() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Settings.EsmEnablement().Get(ctx, getReq) + response, err := w.Settings.EnhancedSecurityMonitoring().Get(ctx, getReq) if err != nil { return err } @@ -105,13 +105,13 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var updateOverrides []func( *cobra.Command, - *settings.UpdateEsmEnablementSettingRequest, + *settings.UpdateEnhancedSecurityMonitoringSettingRequest, ) func newUpdate() *cobra.Command { cmd := &cobra.Command{} - var updateReq settings.UpdateEsmEnablementSettingRequest + var updateReq settings.UpdateEnhancedSecurityMonitoringSettingRequest var updateJson flags.JsonFlag // TODO: short flags @@ -143,7 +143,7 @@ func newUpdate() *cobra.Command { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } - response, err := w.Settings.EsmEnablement().Update(ctx, updateReq) + response, err := w.Settings.EnhancedSecurityMonitoring().Update(ctx, updateReq) if err != nil { return err } @@ -162,4 +162,4 @@ func newUpdate() *cobra.Command { return cmd } -// end service ESMEnablement +// end service EnhancedSecurityMonitoring diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index 7b556c724..034cea2df 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -288,6 +288,7 @@ func newList() *cobra.Command { schema. There is no guarantee of a specific ordering of the elements in the response. + The elements in the response will not contain any aliases or tags. Arguments: FULL_NAME: The full three-level name of the registered model under which to list diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index 0126097fc..b96eb7154 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -401,6 +401,7 @@ func newUpdate() *cobra.Command { // TODO: any: options cmd.Flags().StringVar(&updateReq.Query, "query", updateReq.Query, `The text of the query to be run.`) cmd.Flags().Var(&updateReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`) + // TODO: array: tags cmd.Use = "update QUERY_ID" cmd.Short = `Change a query definition.` diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 38e19e839..214986c76 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -6,9 +6,9 @@ import ( "github.com/spf13/cobra" automatic_cluster_update "github.com/databricks/cli/cmd/workspace/automatic-cluster-update" - csp_enablement "github.com/databricks/cli/cmd/workspace/csp-enablement" + compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile" default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace" - esm_enablement "github.com/databricks/cli/cmd/workspace/esm-enablement" + enhanced_security_monitoring "github.com/databricks/cli/cmd/workspace/enhanced-security-monitoring" restrict_workspace_admins "github.com/databricks/cli/cmd/workspace/restrict-workspace-admins" ) @@ -29,9 +29,9 @@ func New() *cobra.Command { // Add subservices cmd.AddCommand(automatic_cluster_update.New()) - cmd.AddCommand(csp_enablement.New()) + cmd.AddCommand(compliance_security_profile.New()) cmd.AddCommand(default_namespace.New()) - cmd.AddCommand(esm_enablement.New()) + cmd.AddCommand(enhanced_security_monitoring.New()) cmd.AddCommand(restrict_workspace_admins.New()) // Apply optional overrides to this command. diff --git a/go.mod b/go.mod index 636fbf44a..6c8e845a5 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.39.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.40.1 // Apache 2.0 github.com/fatih/color v1.16.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause @@ -57,8 +57,8 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/net v0.24.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.169.0 // indirect diff --git a/go.sum b/go.sum index 3dd6b0cb7..222ce1e4c 100644 --- a/go.sum +++ b/go.sum @@ -28,8 +28,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.39.0 h1:nVnQYkk47SkEsRSXWkn6j7jBOxXgusjoo6xwbaHTGss= -github.com/databricks/databricks-sdk-go v0.39.0/go.mod h1:Yjy1gREDLK65g4axpVbVNKYAHYE2Sqzj0AB9QWHCBVM= +github.com/databricks/databricks-sdk-go v0.40.1 h1:rE5yP9gIW2oap+6CnumixnZSDIsXwVojAuDBuKUl5GU= +github.com/databricks/databricks-sdk-go v0.40.1/go.mod h1:rLIhh7DvifVLmf2QxMr/vMRGqdrTZazn8VYo4LilfCo= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -170,8 +170,8 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -186,8 +186,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= From 157877a152cf157d4a40244b5ecadbc2485e9539 Mon Sep 17 00:00:00 2001 From: Ilia Babanov Date: Thu, 16 May 2024 11:32:55 +0200 Subject: [PATCH 186/286] Fix bundle destroy integration test (#1435) I've updated the `deploy_then_remove_resources` test template in the previous PR, but didn't notice that it was used in the destroy test too. Now destroy test also checks deletion of jobs --- internal/bundle/destroy_test.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/internal/bundle/destroy_test.go b/internal/bundle/destroy_test.go index 43c05fbae..baccf4e6f 100644 --- a/internal/bundle/destroy_test.go +++ b/internal/bundle/destroy_test.go @@ -6,7 +6,9 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/apierr" "github.com/google/uuid" "github.com/stretchr/testify/assert" @@ -17,9 +19,12 @@ func TestAccBundleDestroy(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) w := wt.W + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) uniqueId := uuid.New().String() bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ - "unique_id": uniqueId, + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, }) require.NoError(t, err) @@ -29,7 +34,7 @@ func TestAccBundleDestroy(t *testing.T) { _, err = os.ReadDir(snapshotsDir) assert.ErrorIs(t, err, os.ErrNotExist) - // deploy pipeline + // deploy resources err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) @@ -49,6 +54,12 @@ func TestAccBundleDestroy(t *testing.T) { require.NoError(t, err) assert.Equal(t, pipeline.Name, pipelineName) + // assert job is created + jobName := "test-bundle-job-" + uniqueId + job, err := w.Jobs.GetBySettingsName(ctx, jobName) + require.NoError(t, err) + assert.Equal(t, job.Settings.Name, jobName) + // destroy bundle err = destroyBundle(t, ctx, bundleRoot) require.NoError(t, err) @@ -57,6 +68,10 @@ func TestAccBundleDestroy(t *testing.T) { _, err = w.Pipelines.GetByName(ctx, pipelineName) assert.ErrorContains(t, err, "does not exist") + // assert job is deleted + _, err = w.Jobs.GetBySettingsName(ctx, jobName) + assert.ErrorContains(t, err, "does not exist") + // Assert snapshot file is deleted entries, err = os.ReadDir(snapshotsDir) require.NoError(t, err) From f7d4b272f40b384061cd2a52bab2ef943e3f9578 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Thu, 16 May 2024 12:22:09 +0200 Subject: [PATCH 187/286] Improve token refresh flow (#1434) ## Changes Currently, there are a number of issues with the non-happy-path flows for token refresh in the CLI. If the token refresh fails, the raw error message is presented to the user, as seen below. This message is very difficult for users to interpret and doesn't give any clear direction on how to resolve this issue. ``` Error: token refresh: Post "https://adb-.azuredatabricks.net/oidc/v1/token": http 400: {"error":"invalid_request","error_description":"Refresh token is invalid"} ``` When logging in again, I've noticed that the timeout for logging in is very short, only 45 seconds. If a user is using a password manager and needs to login to that first, or needs to do MFA, 45 seconds may not be enough time. to an account-level profile, it is quite frustrating for users to need to re-enter account ID information when that information is already stored in the user's `.databrickscfg` file. This PR tackles these two issues. First, the presentation of error messages from `databricks auth token` is improved substantially by converting the `error` into a human-readable message. When the refresh token is invalid, it will present a command for the user to run to reauthenticate. If the token fetching failed for some other reason, that reason will be presented in a nice way, providing front-line debugging steps and ultimately redirecting users to file a ticket at this repo if they can't resolve the issue themselves. After this PR, the new error message is: ``` Error: a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run `.databricks/databricks auth login --host https://adb-.azuredatabricks.net` ``` To improve the login flow, this PR modifies `databricks auth login` to auto-complete the account ID from the profile when present. Additionally, it increases the login timeout from 45 seconds to 1 hour to give the user sufficient time to login as needed. To test this change, I needed to refactor some components of the CLI around profile management, the token cache, and the API client used to fetch OAuth tokens. These are now settable in the context, and a demonstration of how they can be set and used is found in `auth_test.go`. Separately, this also demonstrates a sort-of integration test of the CLI by executing the Cobra command for `databricks auth token` from tests, which may be useful for testing other end-to-end functionality in the CLI. In particular, I believe this is necessary in order to set flag values (like the `--profile` flag in this case) for use in testing. ## Tests Unit tests cover the unhappy and happy paths using the mocked API client, token cache, and profiler. Manually tested --------- Co-authored-by: Pieter Noordhuis --- bundle/tests/environment_git_test.go | 8 +- bundle/tests/git_test.go | 8 +- cmd/auth/env.go | 4 +- cmd/auth/login.go | 41 +++-- cmd/auth/login_test.go | 2 +- cmd/auth/profiles.go | 4 +- cmd/auth/token.go | 55 +++++- cmd/auth/token_test.go | 168 ++++++++++++++++++ cmd/labs/project/installer.go | 4 +- cmd/root/auth.go | 28 +-- libs/auth/cache/cache.go | 104 ++--------- libs/auth/cache/file.go | 108 +++++++++++ .../cache/{cache_test.go => file_test.go} | 14 +- libs/auth/cache/in_memory.go | 26 +++ libs/auth/cache/in_memory_test.go | 44 +++++ libs/auth/oauth.go | 31 ++-- libs/databrickscfg/loader_test.go | 10 +- libs/databrickscfg/ops_test.go | 14 +- libs/databrickscfg/profile/context.go | 17 ++ libs/databrickscfg/profile/file.go | 100 +++++++++++ .../file_test.go} | 20 ++- libs/databrickscfg/profile/in_memory.go | 25 +++ libs/databrickscfg/profile/profile.go | 49 +++++ libs/databrickscfg/profile/profiler.go | 32 ++++ .../{ => profile}/testdata/badcfg | 0 .../{ => profile}/testdata/databrickscfg | 0 .../testdata/sample-home/.databrickscfg | 0 libs/databrickscfg/profiles.go | 150 ---------------- 28 files changed, 743 insertions(+), 323 deletions(-) create mode 100644 cmd/auth/token_test.go create mode 100644 libs/auth/cache/file.go rename libs/auth/cache/{cache_test.go => file_test.go} (93%) create mode 100644 libs/auth/cache/in_memory.go create mode 100644 libs/auth/cache/in_memory_test.go create mode 100644 libs/databrickscfg/profile/context.go create mode 100644 libs/databrickscfg/profile/file.go rename libs/databrickscfg/{profiles_test.go => profile/file_test.go} (82%) create mode 100644 libs/databrickscfg/profile/in_memory.go create mode 100644 libs/databrickscfg/profile/profile.go create mode 100644 libs/databrickscfg/profile/profiler.go rename libs/databrickscfg/{ => profile}/testdata/badcfg (100%) rename libs/databrickscfg/{ => profile}/testdata/databrickscfg (100%) rename libs/databrickscfg/{ => profile}/testdata/sample-home/.databrickscfg (100%) delete mode 100644 libs/databrickscfg/profiles.go diff --git a/bundle/tests/environment_git_test.go b/bundle/tests/environment_git_test.go index bb10825e4..ad4aec2e6 100644 --- a/bundle/tests/environment_git_test.go +++ b/bundle/tests/environment_git_test.go @@ -1,6 +1,8 @@ package config_tests import ( + "fmt" + "strings" "testing" "github.com/stretchr/testify/assert" @@ -9,12 +11,14 @@ import ( func TestGitAutoLoadWithEnvironment(t *testing.T) { b := load(t, "./environments_autoload_git") assert.True(t, b.Config.Bundle.Git.Inferred) - assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") + validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") + assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) } func TestGitManuallySetBranchWithEnvironment(t *testing.T) { b := loadTarget(t, "./environments_autoload_git", "production") assert.False(t, b.Config.Bundle.Git.Inferred) assert.Equal(t, "main", b.Config.Bundle.Git.Branch) - assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") + validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") + assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) } diff --git a/bundle/tests/git_test.go b/bundle/tests/git_test.go index b33ffc211..21eaaedd2 100644 --- a/bundle/tests/git_test.go +++ b/bundle/tests/git_test.go @@ -2,6 +2,8 @@ package config_tests import ( "context" + "fmt" + "strings" "testing" "github.com/databricks/cli/bundle" @@ -13,14 +15,16 @@ import ( func TestGitAutoLoad(t *testing.T) { b := load(t, "./autoload_git") assert.True(t, b.Config.Bundle.Git.Inferred) - assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") + validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") + assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) } func TestGitManuallySetBranch(t *testing.T) { b := loadTarget(t, "./autoload_git", "production") assert.False(t, b.Config.Bundle.Git.Inferred) assert.Equal(t, "main", b.Config.Bundle.Git.Branch) - assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") + validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") + assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) } func TestGitBundleBranchValidation(t *testing.T) { diff --git a/cmd/auth/env.go b/cmd/auth/env.go index 04aef36a8..e72d15399 100644 --- a/cmd/auth/env.go +++ b/cmd/auth/env.go @@ -10,7 +10,7 @@ import ( "net/url" "strings" - "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/cli/libs/databrickscfg/profile" "github.com/databricks/databricks-sdk-go/config" "github.com/spf13/cobra" "gopkg.in/ini.v1" @@ -70,7 +70,7 @@ func resolveSection(cfg *config.Config, iniFile *config.File) (*ini.Section, err } func loadFromDatabricksCfg(ctx context.Context, cfg *config.Config) error { - iniFile, err := databrickscfg.Get(ctx) + iniFile, err := profile.DefaultProfiler.Get(ctx) if errors.Is(err, fs.ErrNotExist) { // it's fine not to have ~/.databrickscfg return nil diff --git a/cmd/auth/login.go b/cmd/auth/login.go index c033054b8..11cba8e5f 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/cli/libs/databrickscfg/cfgpickers" + "github.com/databricks/cli/libs/databrickscfg/profile" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" "github.com/spf13/cobra" @@ -31,6 +32,7 @@ func configureHost(ctx context.Context, persistentAuth *auth.PersistentAuth, arg } const minimalDbConnectVersion = "13.1" +const defaultTimeout = 1 * time.Hour func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { defaultConfigPath := "~/.databrickscfg" @@ -84,7 +86,7 @@ depends on the existing profiles you have set in your configuration file var loginTimeout time.Duration var configureCluster bool - cmd.Flags().DurationVar(&loginTimeout, "timeout", auth.DefaultTimeout, + cmd.Flags().DurationVar(&loginTimeout, "timeout", defaultTimeout, "Timeout for completing login challenge in the browser") cmd.Flags().BoolVar(&configureCluster, "configure-cluster", false, "Prompts to configure cluster") @@ -108,7 +110,7 @@ depends on the existing profiles you have set in your configuration file profileName = profile } - err := setHost(ctx, profileName, persistentAuth, args) + err := setHostAndAccountId(ctx, profileName, persistentAuth, args) if err != nil { return err } @@ -117,17 +119,10 @@ depends on the existing profiles you have set in your configuration file // We need the config without the profile before it's used to initialise new workspace client below. // Otherwise it will complain about non existing profile because it was not yet saved. cfg := config.Config{ - Host: persistentAuth.Host, - AuthType: "databricks-cli", + Host: persistentAuth.Host, + AccountID: persistentAuth.AccountID, + AuthType: "databricks-cli", } - if cfg.IsAccountClient() && persistentAuth.AccountID == "" { - accountId, err := promptForAccountID(ctx) - if err != nil { - return err - } - persistentAuth.AccountID = accountId - } - cfg.AccountID = persistentAuth.AccountID ctx, cancel := context.WithTimeout(ctx, loginTimeout) defer cancel() @@ -172,15 +167,15 @@ depends on the existing profiles you have set in your configuration file return cmd } -func setHost(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error { +func setHostAndAccountId(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error { + profiler := profile.GetProfiler(ctx) // If the chosen profile has a hostname and the user hasn't specified a host, infer the host from the profile. - _, profiles, err := databrickscfg.LoadProfiles(ctx, func(p databrickscfg.Profile) bool { - return p.Name == profileName - }) + profiles, err := profiler.LoadProfiles(ctx, profile.WithName(profileName)) // Tolerate ErrNoConfiguration here, as we will write out a configuration as part of the login flow. - if err != nil && !errors.Is(err, databrickscfg.ErrNoConfiguration) { + if err != nil && !errors.Is(err, profile.ErrNoConfiguration) { return err } + if persistentAuth.Host == "" { if len(profiles) > 0 && profiles[0].Host != "" { persistentAuth.Host = profiles[0].Host @@ -188,5 +183,17 @@ func setHost(ctx context.Context, profileName string, persistentAuth *auth.Persi configureHost(ctx, persistentAuth, args, 0) } } + isAccountClient := (&config.Config{Host: persistentAuth.Host}).IsAccountClient() + if isAccountClient && persistentAuth.AccountID == "" { + if len(profiles) > 0 && profiles[0].AccountID != "" { + persistentAuth.AccountID = profiles[0].AccountID + } else { + accountId, err := promptForAccountID(ctx) + if err != nil { + return err + } + persistentAuth.AccountID = accountId + } + } return nil } diff --git a/cmd/auth/login_test.go b/cmd/auth/login_test.go index 9b834bd0a..ce3ca5ae5 100644 --- a/cmd/auth/login_test.go +++ b/cmd/auth/login_test.go @@ -12,6 +12,6 @@ import ( func TestSetHostDoesNotFailWithNoDatabrickscfg(t *testing.T) { ctx := context.Background() ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./imaginary-file/databrickscfg") - err := setHost(ctx, "foo", &auth.PersistentAuth{Host: "test"}, []string{}) + err := setHostAndAccountId(ctx, "foo", &auth.PersistentAuth{Host: "test"}, []string{}) assert.NoError(t, err) } diff --git a/cmd/auth/profiles.go b/cmd/auth/profiles.go index 797eb3b5f..61a6c1f33 100644 --- a/cmd/auth/profiles.go +++ b/cmd/auth/profiles.go @@ -8,7 +8,7 @@ import ( "time" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/cli/libs/databrickscfg/profile" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" @@ -94,7 +94,7 @@ func newProfilesCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { var profiles []*profileMetadata - iniFile, err := databrickscfg.Get(cmd.Context()) + iniFile, err := profile.DefaultProfiler.Get(cmd.Context()) if os.IsNotExist(err) { // return empty list for non-configured machines iniFile = &config.File{ diff --git a/cmd/auth/token.go b/cmd/auth/token.go index d763b9564..3f9af43fa 100644 --- a/cmd/auth/token.go +++ b/cmd/auth/token.go @@ -4,12 +4,44 @@ import ( "context" "encoding/json" "errors" + "fmt" + "os" + "strings" "time" "github.com/databricks/cli/libs/auth" + "github.com/databricks/databricks-sdk-go/httpclient" "github.com/spf13/cobra" ) +type tokenErrorResponse struct { + Error string `json:"error"` + ErrorDescription string `json:"error_description"` +} + +func buildLoginCommand(profile string, persistentAuth *auth.PersistentAuth) string { + executable := os.Args[0] + cmd := []string{ + executable, + "auth", + "login", + } + if profile != "" { + cmd = append(cmd, "--profile", profile) + } else { + cmd = append(cmd, "--host", persistentAuth.Host) + if persistentAuth.AccountID != "" { + cmd = append(cmd, "--account-id", persistentAuth.AccountID) + } + } + return strings.Join(cmd, " ") +} + +func helpfulError(profile string, persistentAuth *auth.PersistentAuth) string { + loginMsg := buildLoginCommand(profile, persistentAuth) + return fmt.Sprintf("Try logging in again with `%s` before retrying. If this fails, please report this issue to the Databricks CLI maintainers at https://github.com/databricks/cli/issues/new", loginMsg) +} + func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { cmd := &cobra.Command{ Use: "token [HOST]", @@ -17,7 +49,7 @@ func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { } var tokenTimeout time.Duration - cmd.Flags().DurationVar(&tokenTimeout, "timeout", auth.DefaultTimeout, + cmd.Flags().DurationVar(&tokenTimeout, "timeout", defaultTimeout, "Timeout for acquiring a token.") cmd.RunE = func(cmd *cobra.Command, args []string) error { @@ -29,11 +61,11 @@ func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { profileName = profileFlag.Value.String() // If a profile is provided we read the host from the .databrickscfg file if profileName != "" && len(args) > 0 { - return errors.New("providing both a profile and a host parameters is not supported") + return errors.New("providing both a profile and host is not supported") } } - err := setHost(ctx, profileName, persistentAuth, args) + err := setHostAndAccountId(ctx, profileName, persistentAuth, args) if err != nil { return err } @@ -42,8 +74,21 @@ func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { ctx, cancel := context.WithTimeout(ctx, tokenTimeout) defer cancel() t, err := persistentAuth.Load(ctx) - if err != nil { - return err + var httpErr *httpclient.HttpError + if errors.As(err, &httpErr) { + helpMsg := helpfulError(profileName, persistentAuth) + t := &tokenErrorResponse{} + err = json.Unmarshal([]byte(httpErr.Message), t) + if err != nil { + return fmt.Errorf("unexpected parsing token response: %w. %s", err, helpMsg) + } + if t.ErrorDescription == "Refresh token is invalid" { + return fmt.Errorf("a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run `%s`", buildLoginCommand(profileName, persistentAuth)) + } else { + return fmt.Errorf("unexpected error refreshing token: %s. %s", t.ErrorDescription, helpMsg) + } + } else if err != nil { + return fmt.Errorf("unexpected error refreshing token: %w. %s", err, helpfulError(profileName, persistentAuth)) } raw, err := json.MarshalIndent(t, "", " ") if err != nil { diff --git a/cmd/auth/token_test.go b/cmd/auth/token_test.go new file mode 100644 index 000000000..df98cc151 --- /dev/null +++ b/cmd/auth/token_test.go @@ -0,0 +1,168 @@ +package auth_test + +import ( + "bytes" + "context" + "encoding/json" + "testing" + "time" + + "github.com/databricks/cli/cmd" + "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/auth/cache" + "github.com/databricks/cli/libs/databrickscfg/profile" + "github.com/databricks/databricks-sdk-go/httpclient" + "github.com/databricks/databricks-sdk-go/httpclient/fixtures" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "golang.org/x/oauth2" +) + +var refreshFailureTokenResponse = fixtures.HTTPFixture{ + MatchAny: true, + Status: 401, + Response: map[string]string{ + "error": "invalid_request", + "error_description": "Refresh token is invalid", + }, +} + +var refreshFailureInvalidResponse = fixtures.HTTPFixture{ + MatchAny: true, + Status: 401, + Response: "Not json", +} + +var refreshFailureOtherError = fixtures.HTTPFixture{ + MatchAny: true, + Status: 401, + Response: map[string]string{ + "error": "other_error", + "error_description": "Databricks is down", + }, +} + +var refreshSuccessTokenResponse = fixtures.HTTPFixture{ + MatchAny: true, + Status: 200, + Response: map[string]string{ + "access_token": "new-access-token", + "token_type": "Bearer", + "expires_in": "3600", + }, +} + +func validateToken(t *testing.T, resp string) { + res := map[string]string{} + err := json.Unmarshal([]byte(resp), &res) + assert.NoError(t, err) + assert.Equal(t, "new-access-token", res["access_token"]) + assert.Equal(t, "Bearer", res["token_type"]) +} + +func getContextForTest(f fixtures.HTTPFixture) context.Context { + profiler := profile.InMemoryProfiler{ + Profiles: profile.Profiles{ + { + Name: "expired", + Host: "https://accounts.cloud.databricks.com", + AccountID: "expired", + }, + { + Name: "active", + Host: "https://accounts.cloud.databricks.com", + AccountID: "active", + }, + }, + } + tokenCache := &cache.InMemoryTokenCache{ + Tokens: map[string]*oauth2.Token{ + "https://accounts.cloud.databricks.com/oidc/accounts/expired": { + RefreshToken: "expired", + }, + "https://accounts.cloud.databricks.com/oidc/accounts/active": { + RefreshToken: "active", + Expiry: time.Now().Add(1 * time.Hour), // Hopefully unit tests don't take an hour to run + }, + }, + } + client := httpclient.NewApiClient(httpclient.ClientConfig{ + Transport: fixtures.SliceTransport{f}, + }) + ctx := profile.WithProfiler(context.Background(), profiler) + ctx = cache.WithTokenCache(ctx, tokenCache) + ctx = auth.WithApiClientForOAuth(ctx, client) + return ctx +} + +func getCobraCmdForTest(f fixtures.HTTPFixture) (*cobra.Command, *bytes.Buffer) { + ctx := getContextForTest(f) + c := cmd.New(ctx) + output := &bytes.Buffer{} + c.SetOut(output) + return c, output +} + +func TestTokenCmdWithProfilePrintsHelpfulLoginMessageOnRefreshFailure(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshFailureTokenResponse) + cmd.SetArgs([]string{"auth", "token", "--profile", "expired"}) + err := cmd.Execute() + + out := output.String() + assert.Empty(t, out) + assert.ErrorContains(t, err, "a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run ") + assert.ErrorContains(t, err, "auth login --profile expired") +} + +func TestTokenCmdWithHostPrintsHelpfulLoginMessageOnRefreshFailure(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshFailureTokenResponse) + cmd.SetArgs([]string{"auth", "token", "--host", "https://accounts.cloud.databricks.com", "--account-id", "expired"}) + err := cmd.Execute() + + out := output.String() + assert.Empty(t, out) + assert.ErrorContains(t, err, "a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run ") + assert.ErrorContains(t, err, "auth login --host https://accounts.cloud.databricks.com --account-id expired") +} + +func TestTokenCmdInvalidResponse(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshFailureInvalidResponse) + cmd.SetArgs([]string{"auth", "token", "--profile", "active"}) + err := cmd.Execute() + + out := output.String() + assert.Empty(t, out) + assert.ErrorContains(t, err, "unexpected parsing token response: invalid character 'N' looking for beginning of value. Try logging in again with ") + assert.ErrorContains(t, err, "auth login --profile active` before retrying. If this fails, please report this issue to the Databricks CLI maintainers at https://github.com/databricks/cli/issues/new") +} + +func TestTokenCmdOtherErrorResponse(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshFailureOtherError) + cmd.SetArgs([]string{"auth", "token", "--profile", "active"}) + err := cmd.Execute() + + out := output.String() + assert.Empty(t, out) + assert.ErrorContains(t, err, "unexpected error refreshing token: Databricks is down. Try logging in again with ") + assert.ErrorContains(t, err, "auth login --profile active` before retrying. If this fails, please report this issue to the Databricks CLI maintainers at https://github.com/databricks/cli/issues/new") +} + +func TestTokenCmdWithProfileSuccess(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshSuccessTokenResponse) + cmd.SetArgs([]string{"auth", "token", "--profile", "active"}) + err := cmd.Execute() + + out := output.String() + validateToken(t, out) + assert.NoError(t, err) +} + +func TestTokenCmdWithHostSuccess(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshSuccessTokenResponse) + cmd.SetArgs([]string{"auth", "token", "--host", "https://accounts.cloud.databricks.com", "--account-id", "expired"}) + err := cmd.Execute() + + out := output.String() + validateToken(t, out) + assert.NoError(t, err) +} diff --git a/cmd/labs/project/installer.go b/cmd/labs/project/installer.go index 42c4a8496..92dfe9e7c 100644 --- a/cmd/labs/project/installer.go +++ b/cmd/labs/project/installer.go @@ -11,8 +11,8 @@ import ( "github.com/databricks/cli/cmd/labs/github" "github.com/databricks/cli/cmd/labs/unpack" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/cli/libs/databrickscfg/cfgpickers" + "github.com/databricks/cli/libs/databrickscfg/profile" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/process" "github.com/databricks/cli/libs/python" @@ -89,7 +89,7 @@ func (i *installer) Install(ctx context.Context) error { return err } w, err := i.login(ctx) - if err != nil && errors.Is(err, databrickscfg.ErrNoConfiguration) { + if err != nil && errors.Is(err, profile.ErrNoConfiguration) { cfg, err := i.Installer.envAwareConfig(ctx) if err != nil { return err diff --git a/cmd/root/auth.go b/cmd/root/auth.go index 387b67f0d..107679105 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -7,7 +7,7 @@ import ( "net/http" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/cli/libs/databrickscfg/profile" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" "github.com/manifoldco/promptui" @@ -37,7 +37,7 @@ func (e ErrNoAccountProfiles) Error() string { func initProfileFlag(cmd *cobra.Command) { cmd.PersistentFlags().StringP("profile", "p", "", "~/.databrickscfg profile") - cmd.RegisterFlagCompletionFunc("profile", databrickscfg.ProfileCompletion) + cmd.RegisterFlagCompletionFunc("profile", profile.ProfileCompletion) } func profileFlagValue(cmd *cobra.Command) (string, bool) { @@ -111,27 +111,29 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { cfg := &config.Config{} // The command-line profile flag takes precedence over DATABRICKS_CONFIG_PROFILE. - profile, hasProfileFlag := profileFlagValue(cmd) + pr, hasProfileFlag := profileFlagValue(cmd) if hasProfileFlag { - cfg.Profile = profile + cfg.Profile = pr } ctx := cmd.Context() ctx = context.WithValue(ctx, &configUsed, cfg) cmd.SetContext(ctx) + profiler := profile.GetProfiler(ctx) + if cfg.Profile == "" { // account-level CLI was not really done before, so here are the assumptions: // 1. only admins will have account configured // 2. 99% of admins will have access to just one account // hence, we don't need to create a special "DEFAULT_ACCOUNT" profile yet - _, profiles, err := databrickscfg.LoadProfiles(cmd.Context(), databrickscfg.MatchAccountProfiles) + profiles, err := profiler.LoadProfiles(cmd.Context(), profile.MatchAccountProfiles) if err == nil && len(profiles) == 1 { cfg.Profile = profiles[0].Name } // if there is no config file, we don't want to fail and instead just skip it - if err != nil && !errors.Is(err, databrickscfg.ErrNoConfiguration) { + if err != nil && !errors.Is(err, profile.ErrNoConfiguration) { return err } } @@ -233,11 +235,12 @@ func SetAccountClient(ctx context.Context, a *databricks.AccountClient) context. } func AskForWorkspaceProfile(ctx context.Context) (string, error) { - path, err := databrickscfg.GetPath(ctx) + profiler := profile.GetProfiler(ctx) + path, err := profiler.GetPath(ctx) if err != nil { return "", fmt.Errorf("cannot determine Databricks config file path: %w", err) } - file, profiles, err := databrickscfg.LoadProfiles(ctx, databrickscfg.MatchWorkspaceProfiles) + profiles, err := profiler.LoadProfiles(ctx, profile.MatchWorkspaceProfiles) if err != nil { return "", err } @@ -248,7 +251,7 @@ func AskForWorkspaceProfile(ctx context.Context) (string, error) { return profiles[0].Name, nil } i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ - Label: fmt.Sprintf("Workspace profiles defined in %s", file), + Label: fmt.Sprintf("Workspace profiles defined in %s", path), Items: profiles, Searcher: profiles.SearchCaseInsensitive, StartInSearchMode: true, @@ -266,11 +269,12 @@ func AskForWorkspaceProfile(ctx context.Context) (string, error) { } func AskForAccountProfile(ctx context.Context) (string, error) { - path, err := databrickscfg.GetPath(ctx) + profiler := profile.GetProfiler(ctx) + path, err := profiler.GetPath(ctx) if err != nil { return "", fmt.Errorf("cannot determine Databricks config file path: %w", err) } - file, profiles, err := databrickscfg.LoadProfiles(ctx, databrickscfg.MatchAccountProfiles) + profiles, err := profiler.LoadProfiles(ctx, profile.MatchAccountProfiles) if err != nil { return "", err } @@ -281,7 +285,7 @@ func AskForAccountProfile(ctx context.Context) (string, error) { return profiles[0].Name, nil } i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ - Label: fmt.Sprintf("Account profiles defined in %s", file), + Label: fmt.Sprintf("Account profiles defined in %s", path), Items: profiles, Searcher: profiles.SearchCaseInsensitive, StartInSearchMode: true, diff --git a/libs/auth/cache/cache.go b/libs/auth/cache/cache.go index 5511c1922..097353e74 100644 --- a/libs/auth/cache/cache.go +++ b/libs/auth/cache/cache.go @@ -1,106 +1,26 @@ package cache import ( - "encoding/json" - "errors" - "fmt" - "io/fs" - "os" - "path/filepath" + "context" "golang.org/x/oauth2" ) -const ( - // where the token cache is stored - tokenCacheFile = ".databricks/token-cache.json" - - // only the owner of the file has full execute, read, and write access - ownerExecReadWrite = 0o700 - - // only the owner of the file has full read and write access - ownerReadWrite = 0o600 - - // format versioning leaves some room for format improvement - tokenCacheVersion = 1 -) - -var ErrNotConfigured = errors.New("databricks OAuth is not configured for this host") - -// this implementation requires the calling code to do a machine-wide lock, -// otherwise the file might get corrupt. -type TokenCache struct { - Version int `json:"version"` - Tokens map[string]*oauth2.Token `json:"tokens"` - - fileLocation string +type TokenCache interface { + Store(key string, t *oauth2.Token) error + Lookup(key string) (*oauth2.Token, error) } -func (c *TokenCache) Store(key string, t *oauth2.Token) error { - err := c.load() - if errors.Is(err, fs.ErrNotExist) { - dir := filepath.Dir(c.fileLocation) - err = os.MkdirAll(dir, ownerExecReadWrite) - if err != nil { - return fmt.Errorf("mkdir: %w", err) - } - } else if err != nil { - return fmt.Errorf("load: %w", err) - } - c.Version = tokenCacheVersion - if c.Tokens == nil { - c.Tokens = map[string]*oauth2.Token{} - } - c.Tokens[key] = t - raw, err := json.MarshalIndent(c, "", " ") - if err != nil { - return fmt.Errorf("marshal: %w", err) - } - return os.WriteFile(c.fileLocation, raw, ownerReadWrite) +var tokenCache int + +func WithTokenCache(ctx context.Context, c TokenCache) context.Context { + return context.WithValue(ctx, &tokenCache, c) } -func (c *TokenCache) Lookup(key string) (*oauth2.Token, error) { - err := c.load() - if errors.Is(err, fs.ErrNotExist) { - return nil, ErrNotConfigured - } else if err != nil { - return nil, fmt.Errorf("load: %w", err) - } - t, ok := c.Tokens[key] +func GetTokenCache(ctx context.Context) TokenCache { + c, ok := ctx.Value(&tokenCache).(TokenCache) if !ok { - return nil, ErrNotConfigured + return &FileTokenCache{} } - return t, nil -} - -func (c *TokenCache) location() (string, error) { - home, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("home: %w", err) - } - return filepath.Join(home, tokenCacheFile), nil -} - -func (c *TokenCache) load() error { - loc, err := c.location() - if err != nil { - return err - } - c.fileLocation = loc - raw, err := os.ReadFile(loc) - if err != nil { - return fmt.Errorf("read: %w", err) - } - err = json.Unmarshal(raw, c) - if err != nil { - return fmt.Errorf("parse: %w", err) - } - if c.Version != tokenCacheVersion { - // in the later iterations we could do state upgraders, - // so that we transform token cache from v1 to v2 without - // losing the tokens and asking the user to re-authenticate. - return fmt.Errorf("needs version %d, got version %d", - tokenCacheVersion, c.Version) - } - return nil + return c } diff --git a/libs/auth/cache/file.go b/libs/auth/cache/file.go new file mode 100644 index 000000000..38dfea9f2 --- /dev/null +++ b/libs/auth/cache/file.go @@ -0,0 +1,108 @@ +package cache + +import ( + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + + "golang.org/x/oauth2" +) + +const ( + // where the token cache is stored + tokenCacheFile = ".databricks/token-cache.json" + + // only the owner of the file has full execute, read, and write access + ownerExecReadWrite = 0o700 + + // only the owner of the file has full read and write access + ownerReadWrite = 0o600 + + // format versioning leaves some room for format improvement + tokenCacheVersion = 1 +) + +var ErrNotConfigured = errors.New("databricks OAuth is not configured for this host") + +// this implementation requires the calling code to do a machine-wide lock, +// otherwise the file might get corrupt. +type FileTokenCache struct { + Version int `json:"version"` + Tokens map[string]*oauth2.Token `json:"tokens"` + + fileLocation string +} + +func (c *FileTokenCache) Store(key string, t *oauth2.Token) error { + err := c.load() + if errors.Is(err, fs.ErrNotExist) { + dir := filepath.Dir(c.fileLocation) + err = os.MkdirAll(dir, ownerExecReadWrite) + if err != nil { + return fmt.Errorf("mkdir: %w", err) + } + } else if err != nil { + return fmt.Errorf("load: %w", err) + } + c.Version = tokenCacheVersion + if c.Tokens == nil { + c.Tokens = map[string]*oauth2.Token{} + } + c.Tokens[key] = t + raw, err := json.MarshalIndent(c, "", " ") + if err != nil { + return fmt.Errorf("marshal: %w", err) + } + return os.WriteFile(c.fileLocation, raw, ownerReadWrite) +} + +func (c *FileTokenCache) Lookup(key string) (*oauth2.Token, error) { + err := c.load() + if errors.Is(err, fs.ErrNotExist) { + return nil, ErrNotConfigured + } else if err != nil { + return nil, fmt.Errorf("load: %w", err) + } + t, ok := c.Tokens[key] + if !ok { + return nil, ErrNotConfigured + } + return t, nil +} + +func (c *FileTokenCache) location() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("home: %w", err) + } + return filepath.Join(home, tokenCacheFile), nil +} + +func (c *FileTokenCache) load() error { + loc, err := c.location() + if err != nil { + return err + } + c.fileLocation = loc + raw, err := os.ReadFile(loc) + if err != nil { + return fmt.Errorf("read: %w", err) + } + err = json.Unmarshal(raw, c) + if err != nil { + return fmt.Errorf("parse: %w", err) + } + if c.Version != tokenCacheVersion { + // in the later iterations we could do state upgraders, + // so that we transform token cache from v1 to v2 without + // losing the tokens and asking the user to re-authenticate. + return fmt.Errorf("needs version %d, got version %d", + tokenCacheVersion, c.Version) + } + return nil +} + +var _ TokenCache = (*FileTokenCache)(nil) diff --git a/libs/auth/cache/cache_test.go b/libs/auth/cache/file_test.go similarity index 93% rename from libs/auth/cache/cache_test.go rename to libs/auth/cache/file_test.go index 6529882c7..3e4aae36f 100644 --- a/libs/auth/cache/cache_test.go +++ b/libs/auth/cache/file_test.go @@ -27,7 +27,7 @@ func setup(t *testing.T) string { func TestStoreAndLookup(t *testing.T) { setup(t) - c := &TokenCache{} + c := &FileTokenCache{} err := c.Store("x", &oauth2.Token{ AccessToken: "abc", }) @@ -38,7 +38,7 @@ func TestStoreAndLookup(t *testing.T) { }) require.NoError(t, err) - l := &TokenCache{} + l := &FileTokenCache{} tok, err := l.Lookup("x") require.NoError(t, err) assert.Equal(t, "abc", tok.AccessToken) @@ -50,7 +50,7 @@ func TestStoreAndLookup(t *testing.T) { func TestNoCacheFileReturnsErrNotConfigured(t *testing.T) { setup(t) - l := &TokenCache{} + l := &FileTokenCache{} _, err := l.Lookup("x") assert.Equal(t, ErrNotConfigured, err) } @@ -63,7 +63,7 @@ func TestLoadCorruptFile(t *testing.T) { err = os.WriteFile(f, []byte("abc"), ownerExecReadWrite) require.NoError(t, err) - l := &TokenCache{} + l := &FileTokenCache{} _, err = l.Lookup("x") assert.EqualError(t, err, "load: parse: invalid character 'a' looking for beginning of value") } @@ -76,14 +76,14 @@ func TestLoadWrongVersion(t *testing.T) { err = os.WriteFile(f, []byte(`{"version": 823, "things": []}`), ownerExecReadWrite) require.NoError(t, err) - l := &TokenCache{} + l := &FileTokenCache{} _, err = l.Lookup("x") assert.EqualError(t, err, "load: needs version 1, got version 823") } func TestDevNull(t *testing.T) { t.Setenv(homeEnvVar, "/dev/null") - l := &TokenCache{} + l := &FileTokenCache{} _, err := l.Lookup("x") // macOS/Linux: load: read: open /dev/null/.databricks/token-cache.json: // windows: databricks OAuth is not configured for this host @@ -95,7 +95,7 @@ func TestStoreOnDev(t *testing.T) { t.SkipNow() } t.Setenv(homeEnvVar, "/dev") - c := &TokenCache{} + c := &FileTokenCache{} err := c.Store("x", &oauth2.Token{ AccessToken: "abc", }) diff --git a/libs/auth/cache/in_memory.go b/libs/auth/cache/in_memory.go new file mode 100644 index 000000000..469d45575 --- /dev/null +++ b/libs/auth/cache/in_memory.go @@ -0,0 +1,26 @@ +package cache + +import ( + "golang.org/x/oauth2" +) + +type InMemoryTokenCache struct { + Tokens map[string]*oauth2.Token +} + +// Lookup implements TokenCache. +func (i *InMemoryTokenCache) Lookup(key string) (*oauth2.Token, error) { + token, ok := i.Tokens[key] + if !ok { + return nil, ErrNotConfigured + } + return token, nil +} + +// Store implements TokenCache. +func (i *InMemoryTokenCache) Store(key string, t *oauth2.Token) error { + i.Tokens[key] = t + return nil +} + +var _ TokenCache = (*InMemoryTokenCache)(nil) diff --git a/libs/auth/cache/in_memory_test.go b/libs/auth/cache/in_memory_test.go new file mode 100644 index 000000000..d8394d3b2 --- /dev/null +++ b/libs/auth/cache/in_memory_test.go @@ -0,0 +1,44 @@ +package cache + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/oauth2" +) + +func TestInMemoryCacheHit(t *testing.T) { + token := &oauth2.Token{ + AccessToken: "abc", + } + c := &InMemoryTokenCache{ + Tokens: map[string]*oauth2.Token{ + "key": token, + }, + } + res, err := c.Lookup("key") + assert.Equal(t, res, token) + assert.NoError(t, err) +} + +func TestInMemoryCacheMiss(t *testing.T) { + c := &InMemoryTokenCache{ + Tokens: map[string]*oauth2.Token{}, + } + _, err := c.Lookup("key") + assert.ErrorIs(t, err, ErrNotConfigured) +} + +func TestInMemoryCacheStore(t *testing.T) { + token := &oauth2.Token{ + AccessToken: "abc", + } + c := &InMemoryTokenCache{ + Tokens: map[string]*oauth2.Token{}, + } + err := c.Store("key", token) + assert.NoError(t, err) + res, err := c.Lookup("key") + assert.Equal(t, res, token) + assert.NoError(t, err) +} diff --git a/libs/auth/oauth.go b/libs/auth/oauth.go index 4ce0d4def..1f3e032de 100644 --- a/libs/auth/oauth.go +++ b/libs/auth/oauth.go @@ -20,6 +20,20 @@ import ( "golang.org/x/oauth2/authhandler" ) +var apiClientForOauth int + +func WithApiClientForOAuth(ctx context.Context, c *httpclient.ApiClient) context.Context { + return context.WithValue(ctx, &apiClientForOauth, c) +} + +func GetApiClientForOAuth(ctx context.Context) *httpclient.ApiClient { + c, ok := ctx.Value(&apiClientForOauth).(*httpclient.ApiClient) + if !ok { + return httpclient.NewApiClient(httpclient.ClientConfig{}) + } + return c +} + const ( // these values are predefined by Databricks as a public client // and is specific to this application only. Using these values @@ -28,7 +42,7 @@ const ( appRedirectAddr = "localhost:8020" // maximum amount of time to acquire listener on appRedirectAddr - DefaultTimeout = 45 * time.Second + listenerTimeout = 45 * time.Second ) var ( // Databricks SDK API: `databricks OAuth is not` will be checked for presence @@ -42,14 +56,13 @@ type PersistentAuth struct { AccountID string http *httpclient.ApiClient - cache tokenCache + cache cache.TokenCache ln net.Listener browser func(string) error } -type tokenCache interface { - Store(key string, t *oauth2.Token) error - Lookup(key string) (*oauth2.Token, error) +func (a *PersistentAuth) SetApiClient(h *httpclient.ApiClient) { + a.http = h } func (a *PersistentAuth) Load(ctx context.Context) (*oauth2.Token, error) { @@ -136,12 +149,10 @@ func (a *PersistentAuth) init(ctx context.Context) error { return ErrFetchCredentials } if a.http == nil { - a.http = httpclient.NewApiClient(httpclient.ClientConfig{ - // noop - }) + a.http = GetApiClientForOAuth(ctx) } if a.cache == nil { - a.cache = &cache.TokenCache{} + a.cache = cache.GetTokenCache(ctx) } if a.browser == nil { a.browser = browser.OpenURL @@ -149,7 +160,7 @@ func (a *PersistentAuth) init(ctx context.Context) error { // try acquire listener, which we also use as a machine-local // exclusive lock to prevent token cache corruption in the scope // of developer machine, where this command runs. - listener, err := retries.Poll(ctx, DefaultTimeout, + listener, err := retries.Poll(ctx, listenerTimeout, func() (*net.Listener, *retries.Err) { var lc net.ListenConfig l, err := lc.Listen(ctx, "tcp", appRedirectAddr) diff --git a/libs/databrickscfg/loader_test.go b/libs/databrickscfg/loader_test.go index 4525115e0..c42fcdbdd 100644 --- a/libs/databrickscfg/loader_test.go +++ b/libs/databrickscfg/loader_test.go @@ -68,7 +68,7 @@ func TestLoaderErrorsOnInvalidFile(t *testing.T) { Loaders: []config.Loader{ ResolveProfileFromHost, }, - ConfigFile: "testdata/badcfg", + ConfigFile: "profile/testdata/badcfg", Host: "https://default", } @@ -81,7 +81,7 @@ func TestLoaderSkipsNoMatchingHost(t *testing.T) { Loaders: []config.Loader{ ResolveProfileFromHost, }, - ConfigFile: "testdata/databrickscfg", + ConfigFile: "profile/testdata/databrickscfg", Host: "https://noneofthehostsmatch", } @@ -95,7 +95,7 @@ func TestLoaderMatchingHost(t *testing.T) { Loaders: []config.Loader{ ResolveProfileFromHost, }, - ConfigFile: "testdata/databrickscfg", + ConfigFile: "profile/testdata/databrickscfg", Host: "https://default", } @@ -110,7 +110,7 @@ func TestLoaderMatchingHostWithQuery(t *testing.T) { Loaders: []config.Loader{ ResolveProfileFromHost, }, - ConfigFile: "testdata/databrickscfg", + ConfigFile: "profile/testdata/databrickscfg", Host: "https://query/?foo=bar", } @@ -125,7 +125,7 @@ func TestLoaderErrorsOnMultipleMatches(t *testing.T) { Loaders: []config.Loader{ ResolveProfileFromHost, }, - ConfigFile: "testdata/databrickscfg", + ConfigFile: "profile/testdata/databrickscfg", Host: "https://foo/bar", } diff --git a/libs/databrickscfg/ops_test.go b/libs/databrickscfg/ops_test.go index 233555fe2..3ea92024c 100644 --- a/libs/databrickscfg/ops_test.go +++ b/libs/databrickscfg/ops_test.go @@ -30,7 +30,7 @@ func TestLoadOrCreate_NotAllowed(t *testing.T) { } func TestLoadOrCreate_Bad(t *testing.T) { - path := "testdata/badcfg" + path := "profile/testdata/badcfg" file, err := loadOrCreateConfigFile(path) assert.Error(t, err) assert.Nil(t, file) @@ -40,7 +40,7 @@ func TestMatchOrCreateSection_Direct(t *testing.T) { cfg := &config.Config{ Profile: "query", } - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() @@ -54,7 +54,7 @@ func TestMatchOrCreateSection_AccountID(t *testing.T) { cfg := &config.Config{ AccountID: "abc", } - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() @@ -68,7 +68,7 @@ func TestMatchOrCreateSection_NormalizeHost(t *testing.T) { cfg := &config.Config{ Host: "https://query/?o=abracadabra", } - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() @@ -80,7 +80,7 @@ func TestMatchOrCreateSection_NormalizeHost(t *testing.T) { func TestMatchOrCreateSection_NoProfileOrHost(t *testing.T) { cfg := &config.Config{} - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() @@ -92,7 +92,7 @@ func TestMatchOrCreateSection_MultipleProfiles(t *testing.T) { cfg := &config.Config{ Host: "https://foo", } - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() @@ -105,7 +105,7 @@ func TestMatchOrCreateSection_NewProfile(t *testing.T) { Host: "https://bar", Profile: "delirium", } - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() diff --git a/libs/databrickscfg/profile/context.go b/libs/databrickscfg/profile/context.go new file mode 100644 index 000000000..fa4d2ad8a --- /dev/null +++ b/libs/databrickscfg/profile/context.go @@ -0,0 +1,17 @@ +package profile + +import "context" + +var profiler int + +func WithProfiler(ctx context.Context, p Profiler) context.Context { + return context.WithValue(ctx, &profiler, p) +} + +func GetProfiler(ctx context.Context) Profiler { + p, ok := ctx.Value(&profiler).(Profiler) + if !ok { + return DefaultProfiler + } + return p +} diff --git a/libs/databrickscfg/profile/file.go b/libs/databrickscfg/profile/file.go new file mode 100644 index 000000000..1b743014e --- /dev/null +++ b/libs/databrickscfg/profile/file.go @@ -0,0 +1,100 @@ +package profile + +import ( + "context" + "errors" + "fmt" + "io/fs" + "path/filepath" + "strings" + + "github.com/databricks/cli/libs/env" + "github.com/databricks/databricks-sdk-go/config" + "github.com/spf13/cobra" +) + +type FileProfilerImpl struct{} + +func (f FileProfilerImpl) getPath(ctx context.Context, replaceHomeDirWithTilde bool) (string, error) { + configFile := env.Get(ctx, "DATABRICKS_CONFIG_FILE") + if configFile == "" { + configFile = "~/.databrickscfg" + } + if !replaceHomeDirWithTilde { + return configFile, nil + } + homedir, err := env.UserHomeDir(ctx) + if err != nil { + return "", err + } + configFile = strings.Replace(configFile, homedir, "~", 1) + return configFile, nil +} + +// Get the path to the .databrickscfg file, falling back to the default in the current user's home directory. +func (f FileProfilerImpl) GetPath(ctx context.Context) (string, error) { + fp, err := f.getPath(ctx, true) + if err != nil { + return "", err + } + return filepath.Clean(fp), nil +} + +var ErrNoConfiguration = errors.New("no configuration file found") + +func (f FileProfilerImpl) Get(ctx context.Context) (*config.File, error) { + path, err := f.getPath(ctx, false) + if err != nil { + return nil, fmt.Errorf("cannot determine Databricks config file path: %w", err) + } + if strings.HasPrefix(path, "~") { + homedir, err := env.UserHomeDir(ctx) + if err != nil { + return nil, err + } + path = filepath.Join(homedir, path[1:]) + } + configFile, err := config.LoadFile(path) + if errors.Is(err, fs.ErrNotExist) { + // downstreams depend on ErrNoConfiguration. TODO: expose this error through SDK + return nil, fmt.Errorf("%w at %s; please create one by running 'databricks configure'", ErrNoConfiguration, path) + } else if err != nil { + return nil, err + } + return configFile, nil +} + +func (f FileProfilerImpl) LoadProfiles(ctx context.Context, fn ProfileMatchFunction) (profiles Profiles, err error) { + file, err := f.Get(ctx) + if err != nil { + return nil, fmt.Errorf("cannot load Databricks config file: %w", err) + } + + // Iterate over sections and collect matching profiles. + for _, v := range file.Sections() { + all := v.KeysHash() + host, ok := all["host"] + if !ok { + // invalid profile + continue + } + profile := Profile{ + Name: v.Name(), + Host: host, + AccountID: all["account_id"], + } + if fn(profile) { + profiles = append(profiles, profile) + } + } + + return +} + +func ProfileCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + profiles, err := DefaultProfiler.LoadProfiles(cmd.Context(), MatchAllProfiles) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + return profiles.Names(), cobra.ShellCompDirectiveNoFileComp +} diff --git a/libs/databrickscfg/profiles_test.go b/libs/databrickscfg/profile/file_test.go similarity index 82% rename from libs/databrickscfg/profiles_test.go rename to libs/databrickscfg/profile/file_test.go index 33a5c9dfd..8e5cfefc0 100644 --- a/libs/databrickscfg/profiles_test.go +++ b/libs/databrickscfg/profile/file_test.go @@ -1,4 +1,4 @@ -package databrickscfg +package profile import ( "context" @@ -32,7 +32,8 @@ func TestLoadProfilesReturnsHomedirAsTilde(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "testdata") ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") - file, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + profiler := FileProfilerImpl{} + file, err := profiler.GetPath(ctx) require.NoError(t, err) require.Equal(t, filepath.Clean("~/databrickscfg"), file) } @@ -41,7 +42,8 @@ func TestLoadProfilesReturnsHomedirAsTildeExoticFile(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "testdata") ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "~/databrickscfg") - file, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + profiler := FileProfilerImpl{} + file, err := profiler.GetPath(ctx) require.NoError(t, err) require.Equal(t, filepath.Clean("~/databrickscfg"), file) } @@ -49,7 +51,8 @@ func TestLoadProfilesReturnsHomedirAsTildeExoticFile(t *testing.T) { func TestLoadProfilesReturnsHomedirAsTildeDefaultFile(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "testdata/sample-home") - file, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + profiler := FileProfilerImpl{} + file, err := profiler.GetPath(ctx) require.NoError(t, err) require.Equal(t, filepath.Clean("~/.databrickscfg"), file) } @@ -57,14 +60,16 @@ func TestLoadProfilesReturnsHomedirAsTildeDefaultFile(t *testing.T) { func TestLoadProfilesNoConfiguration(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "testdata") - _, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + profiler := FileProfilerImpl{} + _, err := profiler.LoadProfiles(ctx, MatchAllProfiles) require.ErrorIs(t, err, ErrNoConfiguration) } func TestLoadProfilesMatchWorkspace(t *testing.T) { ctx := context.Background() ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") - _, profiles, err := LoadProfiles(ctx, MatchWorkspaceProfiles) + profiler := FileProfilerImpl{} + profiles, err := profiler.LoadProfiles(ctx, MatchWorkspaceProfiles) require.NoError(t, err) assert.Equal(t, []string{"DEFAULT", "query", "foo1", "foo2"}, profiles.Names()) } @@ -72,7 +77,8 @@ func TestLoadProfilesMatchWorkspace(t *testing.T) { func TestLoadProfilesMatchAccount(t *testing.T) { ctx := context.Background() ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") - _, profiles, err := LoadProfiles(ctx, MatchAccountProfiles) + profiler := FileProfilerImpl{} + profiles, err := profiler.LoadProfiles(ctx, MatchAccountProfiles) require.NoError(t, err) assert.Equal(t, []string{"acc"}, profiles.Names()) } diff --git a/libs/databrickscfg/profile/in_memory.go b/libs/databrickscfg/profile/in_memory.go new file mode 100644 index 000000000..902ae42e6 --- /dev/null +++ b/libs/databrickscfg/profile/in_memory.go @@ -0,0 +1,25 @@ +package profile + +import "context" + +type InMemoryProfiler struct { + Profiles Profiles +} + +// GetPath implements Profiler. +func (i InMemoryProfiler) GetPath(context.Context) (string, error) { + return "", nil +} + +// LoadProfiles implements Profiler. +func (i InMemoryProfiler) LoadProfiles(ctx context.Context, f ProfileMatchFunction) (Profiles, error) { + res := make(Profiles, 0) + for _, p := range i.Profiles { + if f(p) { + res = append(res, p) + } + } + return res, nil +} + +var _ Profiler = InMemoryProfiler{} diff --git a/libs/databrickscfg/profile/profile.go b/libs/databrickscfg/profile/profile.go new file mode 100644 index 000000000..510e5c9e5 --- /dev/null +++ b/libs/databrickscfg/profile/profile.go @@ -0,0 +1,49 @@ +package profile + +import ( + "strings" + + "github.com/databricks/databricks-sdk-go/config" +) + +// Profile holds a subset of the keys in a databrickscfg profile. +// It should only be used for prompting and filtering. +// Use its name to construct a config.Config. +type Profile struct { + Name string + Host string + AccountID string +} + +func (p Profile) Cloud() string { + cfg := config.Config{Host: p.Host} + switch { + case cfg.IsAws(): + return "AWS" + case cfg.IsAzure(): + return "Azure" + case cfg.IsGcp(): + return "GCP" + default: + return "" + } +} + +type Profiles []Profile + +// SearchCaseInsensitive implements the promptui.Searcher interface. +// This allows the user to immediately starting typing to narrow down the list. +func (p Profiles) SearchCaseInsensitive(input string, index int) bool { + input = strings.ToLower(input) + name := strings.ToLower(p[index].Name) + host := strings.ToLower(p[index].Host) + return strings.Contains(name, input) || strings.Contains(host, input) +} + +func (p Profiles) Names() []string { + names := make([]string, len(p)) + for i, v := range p { + names[i] = v.Name + } + return names +} diff --git a/libs/databrickscfg/profile/profiler.go b/libs/databrickscfg/profile/profiler.go new file mode 100644 index 000000000..c0a549256 --- /dev/null +++ b/libs/databrickscfg/profile/profiler.go @@ -0,0 +1,32 @@ +package profile + +import ( + "context" +) + +type ProfileMatchFunction func(Profile) bool + +func MatchWorkspaceProfiles(p Profile) bool { + return p.AccountID == "" +} + +func MatchAccountProfiles(p Profile) bool { + return p.Host != "" && p.AccountID != "" +} + +func MatchAllProfiles(p Profile) bool { + return true +} + +func WithName(name string) ProfileMatchFunction { + return func(p Profile) bool { + return p.Name == name + } +} + +type Profiler interface { + LoadProfiles(context.Context, ProfileMatchFunction) (Profiles, error) + GetPath(context.Context) (string, error) +} + +var DefaultProfiler = FileProfilerImpl{} diff --git a/libs/databrickscfg/testdata/badcfg b/libs/databrickscfg/profile/testdata/badcfg similarity index 100% rename from libs/databrickscfg/testdata/badcfg rename to libs/databrickscfg/profile/testdata/badcfg diff --git a/libs/databrickscfg/testdata/databrickscfg b/libs/databrickscfg/profile/testdata/databrickscfg similarity index 100% rename from libs/databrickscfg/testdata/databrickscfg rename to libs/databrickscfg/profile/testdata/databrickscfg diff --git a/libs/databrickscfg/testdata/sample-home/.databrickscfg b/libs/databrickscfg/profile/testdata/sample-home/.databrickscfg similarity index 100% rename from libs/databrickscfg/testdata/sample-home/.databrickscfg rename to libs/databrickscfg/profile/testdata/sample-home/.databrickscfg diff --git a/libs/databrickscfg/profiles.go b/libs/databrickscfg/profiles.go deleted file mode 100644 index 200ac9c87..000000000 --- a/libs/databrickscfg/profiles.go +++ /dev/null @@ -1,150 +0,0 @@ -package databrickscfg - -import ( - "context" - "errors" - "fmt" - "io/fs" - "path/filepath" - "strings" - - "github.com/databricks/cli/libs/env" - "github.com/databricks/databricks-sdk-go/config" - "github.com/spf13/cobra" -) - -// Profile holds a subset of the keys in a databrickscfg profile. -// It should only be used for prompting and filtering. -// Use its name to construct a config.Config. -type Profile struct { - Name string - Host string - AccountID string -} - -func (p Profile) Cloud() string { - cfg := config.Config{Host: p.Host} - switch { - case cfg.IsAws(): - return "AWS" - case cfg.IsAzure(): - return "Azure" - case cfg.IsGcp(): - return "GCP" - default: - return "" - } -} - -type Profiles []Profile - -func (p Profiles) Names() []string { - names := make([]string, len(p)) - for i, v := range p { - names[i] = v.Name - } - return names -} - -// SearchCaseInsensitive implements the promptui.Searcher interface. -// This allows the user to immediately starting typing to narrow down the list. -func (p Profiles) SearchCaseInsensitive(input string, index int) bool { - input = strings.ToLower(input) - name := strings.ToLower(p[index].Name) - host := strings.ToLower(p[index].Host) - return strings.Contains(name, input) || strings.Contains(host, input) -} - -type ProfileMatchFunction func(Profile) bool - -func MatchWorkspaceProfiles(p Profile) bool { - return p.AccountID == "" -} - -func MatchAccountProfiles(p Profile) bool { - return p.Host != "" && p.AccountID != "" -} - -func MatchAllProfiles(p Profile) bool { - return true -} - -// Get the path to the .databrickscfg file, falling back to the default in the current user's home directory. -func GetPath(ctx context.Context) (string, error) { - configFile := env.Get(ctx, "DATABRICKS_CONFIG_FILE") - if configFile == "" { - configFile = "~/.databrickscfg" - } - if strings.HasPrefix(configFile, "~") { - homedir, err := env.UserHomeDir(ctx) - if err != nil { - return "", err - } - configFile = filepath.Join(homedir, configFile[1:]) - } - return configFile, nil -} - -var ErrNoConfiguration = errors.New("no configuration file found") - -func Get(ctx context.Context) (*config.File, error) { - path, err := GetPath(ctx) - if err != nil { - return nil, fmt.Errorf("cannot determine Databricks config file path: %w", err) - } - configFile, err := config.LoadFile(path) - if errors.Is(err, fs.ErrNotExist) { - // downstreams depend on ErrNoConfiguration. TODO: expose this error through SDK - return nil, fmt.Errorf("%w at %s; please create one by running 'databricks configure'", ErrNoConfiguration, path) - } else if err != nil { - return nil, err - } - return configFile, nil -} - -func LoadProfiles(ctx context.Context, fn ProfileMatchFunction) (file string, profiles Profiles, err error) { - f, err := Get(ctx) - if err != nil { - return "", nil, fmt.Errorf("cannot load Databricks config file: %w", err) - } - - // Replace homedir with ~ if applicable. - // This is to make the output more readable. - file = filepath.Clean(f.Path()) - home, err := env.UserHomeDir(ctx) - if err != nil { - return "", nil, err - } - homedir := filepath.Clean(home) - if strings.HasPrefix(file, homedir) { - file = "~" + file[len(homedir):] - } - - // Iterate over sections and collect matching profiles. - for _, v := range f.Sections() { - all := v.KeysHash() - host, ok := all["host"] - if !ok { - // invalid profile - continue - } - profile := Profile{ - Name: v.Name(), - Host: host, - AccountID: all["account_id"], - } - if fn(profile) { - profiles = append(profiles, profile) - } - } - - return -} - -func ProfileCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - _, profiles, err := LoadProfiles(cmd.Context(), MatchAllProfiles) - if err != nil { - return nil, cobra.ShellCompDirectiveError - } - return profiles.Names(), cobra.ShellCompDirectiveNoFileComp -} From 4556d33e6b341f8efe3558b8961cf618299a3648 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 17 May 2024 11:02:30 +0200 Subject: [PATCH 188/286] Don't hide commands of services that are already hidden (#1438) ## Changes Currently, the help output of services in preview doesn't show any of their commands because the commands themselves are hidden as well. This change updates that behavior to not hide commands in preview if the service itself is also in preview. This makes the help output of services in preview actually usable. ## Tests n/a --- .codegen/service.go.tmpl | 6 ++-- cmd/workspace/apps/apps.go | 30 ------------------- .../consumer-fulfillments.go | 6 ---- .../consumer-installations.go | 15 ---------- .../consumer-listings/consumer-listings.go | 9 ------ .../consumer-personalization-requests.go | 9 ------ .../consumer-providers/consumer-providers.go | 6 ---- .../provider-exchange-filters.go | 12 -------- .../provider-exchanges/provider-exchanges.go | 27 ----------------- .../provider-files/provider-files.go | 12 -------- .../provider-listings/provider-listings.go | 15 ---------- .../provider-personalization-requests.go | 6 ---- .../provider-provider-analytics-dashboards.go | 12 -------- .../provider-providers/provider-providers.go | 15 ---------- 14 files changed, 4 insertions(+), 176 deletions(-) diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 492b2132f..ad482ebe6 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -39,6 +39,7 @@ import ( {{define "service"}} {{- $excludeMethods := list "put-secret" -}} +{{- $hideService := .IsPrivatePreview }} // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. @@ -57,7 +58,7 @@ func New() *cobra.Command { "package": "{{ .Package.Name }}", }, {{- end }} - {{- if .IsPrivatePreview }} + {{- if $hideService }} // This service is being previewed; hide from help output. Hidden: true, @@ -190,7 +191,8 @@ func new{{.PascalName}}() *cobra.Command { {{- end -}} ` {{- end }} - {{- if .IsPrivatePreview }} + {{/* Don't hide commands if the service itself is already hidden. */}} + {{- if and (not $hideService) .IsPrivatePreview }} // This command is being previewed; hide from help output. cmd.Hidden = true diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 2ccd16c0c..1d6de4775 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -89,9 +89,6 @@ func newCreate() *cobra.Command { characters and hyphens and be between 2 and 30 characters long. It must be unique within the workspace.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -192,9 +189,6 @@ func newCreateDeployment() *cobra.Command { APP_NAME: The name of the app. SOURCE_CODE_PATH: The source code path of the deployment.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -288,9 +282,6 @@ func newDelete() *cobra.Command { Arguments: NAME: The name of the app.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -349,9 +340,6 @@ func newGet() *cobra.Command { Arguments: NAME: The name of the app.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -412,9 +400,6 @@ func newGetDeployment() *cobra.Command { APP_NAME: The name of the app. DEPLOYMENT_ID: The unique id of the deployment.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -474,9 +459,6 @@ func newGetEnvironment() *cobra.Command { Arguments: NAME: The name of the app.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -535,9 +517,6 @@ func newList() *cobra.Command { Lists all apps in the workspace.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -594,9 +573,6 @@ func newListDeployments() *cobra.Command { Arguments: APP_NAME: The name of the app.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -652,9 +628,6 @@ func newStop() *cobra.Command { Arguments: NAME: The name of the app.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -719,9 +692,6 @@ func newUpdate() *cobra.Command { characters and hyphens and be between 2 and 30 characters long. It must be unique within the workspace.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go b/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go index cd92002a4..6f3ba4b42 100755 --- a/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go +++ b/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go @@ -64,9 +64,6 @@ func newGet() *cobra.Command { Get a high level preview of the metadata of listing installable content.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -126,9 +123,6 @@ func newList() *cobra.Command { Personalized installations contain metadata about the attached share or git repo, as well as the Delta Sharing recipient type.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/consumer-installations/consumer-installations.go b/cmd/workspace/consumer-installations/consumer-installations.go index 9d6c7c894..d176e5b39 100755 --- a/cmd/workspace/consumer-installations/consumer-installations.go +++ b/cmd/workspace/consumer-installations/consumer-installations.go @@ -76,9 +76,6 @@ func newCreate() *cobra.Command { Install payload associated with a Databricks Marketplace listing.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -140,9 +137,6 @@ func newDelete() *cobra.Command { Uninstall an installation associated with a Databricks Marketplace listing.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -202,9 +196,6 @@ func newList() *cobra.Command { List all installations across all listings.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -258,9 +249,6 @@ func newListListingInstallations() *cobra.Command { List all installations for a particular listing.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -321,9 +309,6 @@ func newUpdate() *cobra.Command { the rotateToken flag is true 2. the token will be forcibly rotate if the rotateToken flag is true and the tokenInfo field is empty` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/consumer-listings/consumer-listings.go b/cmd/workspace/consumer-listings/consumer-listings.go index 70295dfb3..f75f03b3a 100755 --- a/cmd/workspace/consumer-listings/consumer-listings.go +++ b/cmd/workspace/consumer-listings/consumer-listings.go @@ -66,9 +66,6 @@ func newGet() *cobra.Command { Get a published listing in the Databricks Marketplace that the consumer has access to.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -148,9 +145,6 @@ func newList() *cobra.Command { List all published listings in the Databricks Marketplace that the consumer has access to.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -215,9 +209,6 @@ func newSearch() *cobra.Command { Arguments: QUERY: Fuzzy matches query` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient diff --git a/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go b/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go index 40ae4c848..c55ca4ee1 100755 --- a/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go +++ b/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go @@ -75,9 +75,6 @@ func newCreate() *cobra.Command { Create a personalization request for a listing.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -142,9 +139,6 @@ func newGet() *cobra.Command { Get the personalization request for a listing. Each consumer can make at *most* one personalization request for a listing.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -203,9 +197,6 @@ func newList() *cobra.Command { List personalization requests for a consumer across all listings.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/consumer-providers/consumer-providers.go b/cmd/workspace/consumer-providers/consumer-providers.go index 5a0849dce..d8ac0ec12 100755 --- a/cmd/workspace/consumer-providers/consumer-providers.go +++ b/cmd/workspace/consumer-providers/consumer-providers.go @@ -64,9 +64,6 @@ func newGet() *cobra.Command { Get a provider in the Databricks Marketplace with at least one visible listing.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -139,9 +136,6 @@ func newList() *cobra.Command { List all providers in the Databricks Marketplace with at least one visible listing.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go b/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go index 43ae6da7e..4ab36b5d0 100755 --- a/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go +++ b/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go @@ -68,9 +68,6 @@ func newCreate() *cobra.Command { Add an exchange filter.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -128,9 +125,6 @@ func newDelete() *cobra.Command { Delete an exchange filter` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -201,9 +195,6 @@ func newList() *cobra.Command { List exchange filter` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -258,9 +249,6 @@ func newUpdate() *cobra.Command { Update an exchange filter.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/provider-exchanges/provider-exchanges.go b/cmd/workspace/provider-exchanges/provider-exchanges.go index c9f5818f5..7ff73e0d1 100755 --- a/cmd/workspace/provider-exchanges/provider-exchanges.go +++ b/cmd/workspace/provider-exchanges/provider-exchanges.go @@ -74,9 +74,6 @@ func newAddListingToExchange() *cobra.Command { Associate an exchange with a listing` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -152,9 +149,6 @@ func newCreate() *cobra.Command { Create an exchange` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -212,9 +206,6 @@ func newDelete() *cobra.Command { This removes a listing from marketplace.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -270,9 +261,6 @@ func newDeleteListingFromExchange() *cobra.Command { Disassociate an exchange with a listing` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -328,9 +316,6 @@ func newGet() *cobra.Command { Get an exchange.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -389,9 +374,6 @@ func newList() *cobra.Command { List exchanges visible to provider` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -445,9 +427,6 @@ func newListExchangesForListing() *cobra.Command { List exchanges associated with a listing` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -503,9 +482,6 @@ func newListListingsForExchange() *cobra.Command { List listings associated with an exchange` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -560,9 +536,6 @@ func newUpdate() *cobra.Command { Update an exchange` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/provider-files/provider-files.go b/cmd/workspace/provider-files/provider-files.go index b9357f131..25e1addf5 100755 --- a/cmd/workspace/provider-files/provider-files.go +++ b/cmd/workspace/provider-files/provider-files.go @@ -72,9 +72,6 @@ func newCreate() *cobra.Command { Create a file. Currently, only provider icons and attached notebooks are supported.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -132,9 +129,6 @@ func newDelete() *cobra.Command { Delete a file` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -202,9 +196,6 @@ func newGet() *cobra.Command { Get a file` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -277,9 +268,6 @@ func newList() *cobra.Command { List files attached to a parent entity.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient diff --git a/cmd/workspace/provider-listings/provider-listings.go b/cmd/workspace/provider-listings/provider-listings.go index 4f90f7b9e..0abdf51d8 100755 --- a/cmd/workspace/provider-listings/provider-listings.go +++ b/cmd/workspace/provider-listings/provider-listings.go @@ -70,9 +70,6 @@ func newCreate() *cobra.Command { Create a new listing` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -130,9 +127,6 @@ func newDelete() *cobra.Command { Delete a listing` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -200,9 +194,6 @@ func newGet() *cobra.Command { Get a listing` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -273,9 +264,6 @@ func newList() *cobra.Command { List listings owned by this provider` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -328,9 +316,6 @@ func newUpdate() *cobra.Command { Update a listing` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go index 58b3cba1d..a38d9f420 100755 --- a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go +++ b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go @@ -69,9 +69,6 @@ func newList() *cobra.Command { List personalization requests to this provider. This will return all personalization requests, regardless of which listing they are for.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -128,9 +125,6 @@ func newUpdate() *cobra.Command { Update personalization request. This method only permits updating the status of the request.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go b/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go index 70ef0f320..8cee6e4eb 100755 --- a/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go +++ b/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go @@ -60,9 +60,6 @@ func newCreate() *cobra.Command { Create provider analytics dashboard. Returns Marketplace specific id. Not to be confused with the Lakeview dashboard id.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -105,9 +102,6 @@ func newGet() *cobra.Command { Get provider analytics dashboard.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -150,9 +144,6 @@ func newGetLatestVersion() *cobra.Command { Get latest version of provider analytics dashboard.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -207,9 +198,6 @@ func newUpdate() *cobra.Command { Arguments: ID: id is immutable property and can't be updated.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/workspace/provider-providers/provider-providers.go b/cmd/workspace/provider-providers/provider-providers.go index 52f4c45ae..b7273a344 100755 --- a/cmd/workspace/provider-providers/provider-providers.go +++ b/cmd/workspace/provider-providers/provider-providers.go @@ -69,9 +69,6 @@ func newCreate() *cobra.Command { Create a provider` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -129,9 +126,6 @@ func newDelete() *cobra.Command { Delete provider` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -199,9 +193,6 @@ func newGet() *cobra.Command { Get provider profile` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.PreRunE = root.MustWorkspaceClient @@ -272,9 +263,6 @@ func newList() *cobra.Command { List provider profiles for account.` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { @@ -327,9 +315,6 @@ func newUpdate() *cobra.Command { Update provider profile` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { From dd941078537581da78a8e6424d4c51953ddbca81 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 17 May 2024 11:26:09 +0200 Subject: [PATCH 189/286] Remove dependency on `ConfigFilePath` from path translation mutator (#1437) ## Changes This is one step toward removing the `path.Paths` struct embedding from resource types. Going forward, we'll exclusively use the `dyn.Value` tree for location information. ## Tests Existing unit tests that cover path resolution with fallback behavior pass. --- bundle/config/mutator/translate_paths.go | 28 +++++++++++++++++++ bundle/config/mutator/translate_paths_jobs.go | 19 ++++--------- .../mutator/translate_paths_pipelines.go | 15 ++-------- bundle/config/paths/paths.go | 10 ------- 4 files changed, 37 insertions(+), 35 deletions(-) diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 018fd79c6..18a09dfd6 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -213,3 +213,31 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnos return diag.FromErr(err) } + +func gatherFallbackPaths(v dyn.Value, typ string) (map[string]string, error) { + var fallback = make(map[string]string) + var pattern = dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey()) + + // Previous behavior was to use a resource's location as the base path to resolve + // relative paths in its definition. With the introduction of [dyn.Value] throughout, + // we can use the location of the [dyn.Value] of the relative path itself. + // + // This is more flexible, as resources may have overrides that are not + // located in the same directory as the resource configuration file. + // + // To maintain backwards compatibility, we allow relative paths to be resolved using + // the original approach as fallback if the [dyn.Value] location cannot be resolved. + _, err := dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + key := p[2].Key() + dir, err := v.Location().Directory() + if err != nil { + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for %s: %w", p, err) + } + fallback[key] = dir + return v, nil + }) + if err != nil { + return nil, err + } + return fallback, nil +} diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go index d41660728..58b5e0fb0 100644 --- a/bundle/config/mutator/translate_paths_jobs.go +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -55,21 +55,14 @@ func rewritePatterns(base dyn.Pattern) []jobRewritePattern { } func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { - var fallback = make(map[string]string) + fallback, err := gatherFallbackPaths(v, "jobs") + if err != nil { + return dyn.InvalidValue, err + } + + // Do not translate job task paths if using Git source var ignore []string - var err error - for key, job := range b.Config.Resources.Jobs { - dir, err := job.ConfigFileDirectory() - if err != nil { - return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err) - } - - // If we cannot resolve the relative path using the [dyn.Value] location itself, - // use the job's location as fallback. This is necessary for backwards compatibility. - fallback[key] = dir - - // Do not translate job task paths if using git source if job.GitSource != nil { ignore = append(ignore, key) } diff --git a/bundle/config/mutator/translate_paths_pipelines.go b/bundle/config/mutator/translate_paths_pipelines.go index caec4198e..5b2a2c346 100644 --- a/bundle/config/mutator/translate_paths_pipelines.go +++ b/bundle/config/mutator/translate_paths_pipelines.go @@ -8,18 +8,9 @@ import ( ) func (m *translatePaths) applyPipelineTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { - var fallback = make(map[string]string) - var err error - - for key, pipeline := range b.Config.Resources.Pipelines { - dir, err := pipeline.ConfigFileDirectory() - if err != nil { - return dyn.InvalidValue, fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) - } - - // If we cannot resolve the relative path using the [dyn.Value] location itself, - // use the pipeline's location as fallback. This is necessary for backwards compatibility. - fallback[key] = dir + fallback, err := gatherFallbackPaths(v, "pipelines") + if err != nil { + return dyn.InvalidValue, err } // Base pattern to match all libraries in all pipelines. diff --git a/bundle/config/paths/paths.go b/bundle/config/paths/paths.go index 68c32a48c..95977ee37 100644 --- a/bundle/config/paths/paths.go +++ b/bundle/config/paths/paths.go @@ -1,9 +1,6 @@ package paths import ( - "fmt" - "path/filepath" - "github.com/databricks/cli/libs/dyn" ) @@ -23,10 +20,3 @@ func (p *Paths) ConfigureConfigFilePath() { } p.ConfigFilePath = p.DynamicValue.Location().File } - -func (p *Paths) ConfigFileDirectory() (string, error) { - if p.ConfigFilePath == "" { - return "", fmt.Errorf("config file path not configured") - } - return filepath.Dir(p.ConfigFilePath), nil -} From 04e56aa4720e821bff4ceea1c34894e4f9c5dc89 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Fri, 17 May 2024 11:34:39 +0200 Subject: [PATCH 190/286] Add `merge.Override` transform (#1428) ## Changes Add `merge.Override` transform. It allows the override one `dyn.Value` with another, preserving source locations for parts of the sub-tree where nothing has changed. This is different from merging, where values are concatenated. `OverrideVisitor` is visiting the changes during the override process and allows to control of what changes are allowed or update the effective value. The primary use case is Python code updating bundle configuration. During override, we update locations only for changed values. This allows us to keep track of locations where values were initially defined and used for error reporting. For instance, merging: ```yaml resources: # location=left.yaml:0 jobs: # location=left.yaml:1 job_0: # location=left.yaml:2 name: "job_0" # location=left.yaml:3 ``` with ```yaml resources: # location=right.yaml:0 jobs: # location=right.yaml:1 job_0: # location=right.yaml:2 name: "job_0" # location=right.yaml:3 description: job 0 # location=right.yaml:4 job_1: # location=right.yaml:5 name: "job_1" # location=right.yaml:5 ``` produces ```yaml resources: # location=left.yaml:0 jobs: # location=left.yaml:1 job_0: # location=left.yaml:2 name: "job_0" # location=left.yaml:3 description: job 0 # location=right.yaml:4 job_1: # location=right.yaml:5 name: "job_1" # location=right.yaml:5 ``` ## Tests Unit tests --- libs/dyn/merge/override.go | 198 +++++++++++++++ libs/dyn/merge/override_test.go | 434 ++++++++++++++++++++++++++++++++ 2 files changed, 632 insertions(+) create mode 100644 libs/dyn/merge/override.go create mode 100644 libs/dyn/merge/override_test.go diff --git a/libs/dyn/merge/override.go b/libs/dyn/merge/override.go new file mode 100644 index 000000000..97e8f1009 --- /dev/null +++ b/libs/dyn/merge/override.go @@ -0,0 +1,198 @@ +package merge + +import ( + "fmt" + + "github.com/databricks/cli/libs/dyn" +) + +// OverrideVisitor is visiting the changes during the override process +// and allows to control what changes are allowed, or update the effective +// value. +// +// For instance, it can disallow changes outside the specific path(s), or update +// the location of the effective value. +// +// 'VisitDelete' is called when a value is removed from mapping or sequence +// 'VisitInsert' is called when a new value is added to mapping or sequence +// 'VisitUpdate' is called when a leaf value is updated +type OverrideVisitor struct { + VisitDelete func(valuePath dyn.Path, left dyn.Value) error + VisitInsert func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) + VisitUpdate func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) +} + +// Override overrides value 'leftRoot' with 'rightRoot', keeping 'location' if values +// haven't changed. Preserving 'location' is important to preserve the original source of the value +// for error reporting. +func Override(leftRoot dyn.Value, rightRoot dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { + return override(dyn.EmptyPath, leftRoot, rightRoot, visitor) +} + +func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { + if left == dyn.NilValue && right == dyn.NilValue { + return dyn.NilValue, nil + } + + if left.Kind() != right.Kind() { + return visitor.VisitUpdate(basePath, left, right) + } + + // NB: we only call 'VisitUpdate' on leaf values, and for sequences and mappings + // we don't know if value was updated or not + + switch left.Kind() { + case dyn.KindMap: + merged, err := overrideMapping(basePath, left.MustMap(), right.MustMap(), visitor) + + if err != nil { + return dyn.InvalidValue, err + } + + return dyn.NewValue(merged, left.Location()), nil + + case dyn.KindSequence: + // some sequences are keyed, and we can detect which elements are added/removed/updated, + // but we don't have this information + merged, err := overrideSequence(basePath, left.MustSequence(), right.MustSequence(), visitor) + + if err != nil { + return dyn.InvalidValue, err + } + + return dyn.NewValue(merged, left.Location()), nil + + case dyn.KindString: + if left.MustString() == right.MustString() { + return left, nil + } else { + return visitor.VisitUpdate(basePath, left, right) + } + + case dyn.KindFloat: + // TODO consider comparison with epsilon if normalization doesn't help, where do we use floats? + + if left.MustFloat() == right.MustFloat() { + return left, nil + } else { + return visitor.VisitUpdate(basePath, left, right) + } + + case dyn.KindBool: + if left.MustBool() == right.MustBool() { + return left, nil + } else { + return visitor.VisitUpdate(basePath, left, right) + } + + case dyn.KindTime: + if left.MustTime() == right.MustTime() { + return left, nil + } else { + return visitor.VisitUpdate(basePath, left, right) + } + + case dyn.KindInt: + if left.MustInt() == right.MustInt() { + return left, nil + } else { + return visitor.VisitUpdate(basePath, left, right) + } + } + + return dyn.InvalidValue, fmt.Errorf("unexpected kind %s", left.Kind()) +} + +func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dyn.Mapping, visitor OverrideVisitor) (dyn.Mapping, error) { + out := dyn.NewMapping() + + for _, leftPair := range leftMapping.Pairs() { + // detect if key was removed + if _, ok := rightMapping.GetPair(leftPair.Key); !ok { + path := basePath.Append(dyn.Key(leftPair.Key.MustString())) + + err := visitor.VisitDelete(path, leftPair.Value) + + if err != nil { + return dyn.NewMapping(), err + } + } + } + + // iterating only right mapping will remove keys not present anymore + // and insert new keys + + for _, rightPair := range rightMapping.Pairs() { + if leftPair, ok := leftMapping.GetPair(rightPair.Key); ok { + path := basePath.Append(dyn.Key(rightPair.Key.MustString())) + newValue, err := override(path, leftPair.Value, rightPair.Value, visitor) + + if err != nil { + return dyn.NewMapping(), err + } + + // key was there before, so keep its location + err = out.Set(leftPair.Key, newValue) + + if err != nil { + return dyn.NewMapping(), err + } + } else { + path := basePath.Append(dyn.Key(rightPair.Key.MustString())) + + newValue, err := visitor.VisitInsert(path, rightPair.Value) + + if err != nil { + return dyn.NewMapping(), err + } + + err = out.Set(rightPair.Key, newValue) + + if err != nil { + return dyn.NewMapping(), err + } + } + } + + return out, nil +} + +func overrideSequence(basePath dyn.Path, left []dyn.Value, right []dyn.Value, visitor OverrideVisitor) ([]dyn.Value, error) { + minLen := min(len(left), len(right)) + var values []dyn.Value + + for i := 0; i < minLen; i++ { + path := basePath.Append(dyn.Index(i)) + merged, err := override(path, left[i], right[i], visitor) + + if err != nil { + return nil, err + } + + values = append(values, merged) + } + + if len(right) > len(left) { + for i := minLen; i < len(right); i++ { + path := basePath.Append(dyn.Index(i)) + newValue, err := visitor.VisitInsert(path, right[i]) + + if err != nil { + return nil, err + } + + values = append(values, newValue) + } + } else if len(left) > len(right) { + for i := minLen; i < len(left); i++ { + path := basePath.Append(dyn.Index(i)) + err := visitor.VisitDelete(path, left[i]) + + if err != nil { + return nil, err + } + } + } + + return values, nil +} diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go new file mode 100644 index 000000000..dbf249d12 --- /dev/null +++ b/libs/dyn/merge/override_test.go @@ -0,0 +1,434 @@ +package merge + +import ( + "testing" + "time" + + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" +) + +type overrideTestCase struct { + name string + left dyn.Value + right dyn.Value + state visitorState + expected dyn.Value +} + +func TestOverride_Primitive(t *testing.T) { + leftLocation := dyn.Location{File: "left.yml", Line: 1, Column: 1} + rightLocation := dyn.Location{File: "right.yml", Line: 1, Column: 1} + + modifiedTestCases := []overrideTestCase{ + { + name: "string (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue("a", leftLocation), + right: dyn.NewValue("b", rightLocation), + expected: dyn.NewValue("b", rightLocation), + }, + { + name: "string (not updated)", + state: visitorState{}, + left: dyn.NewValue("a", leftLocation), + right: dyn.NewValue("a", rightLocation), + expected: dyn.NewValue("a", leftLocation), + }, + { + name: "bool (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue(true, leftLocation), + right: dyn.NewValue(false, rightLocation), + expected: dyn.NewValue(false, rightLocation), + }, + { + name: "bool (not updated)", + state: visitorState{}, + left: dyn.NewValue(true, leftLocation), + right: dyn.NewValue(true, rightLocation), + expected: dyn.NewValue(true, leftLocation), + }, + { + name: "int (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue(1, leftLocation), + right: dyn.NewValue(2, rightLocation), + expected: dyn.NewValue(2, rightLocation), + }, + { + name: "int (not updated)", + state: visitorState{}, + left: dyn.NewValue(int32(1), leftLocation), + right: dyn.NewValue(int64(1), rightLocation), + expected: dyn.NewValue(int32(1), leftLocation), + }, + { + name: "float (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue(1.0, leftLocation), + right: dyn.NewValue(2.0, rightLocation), + expected: dyn.NewValue(2.0, rightLocation), + }, + { + name: "float (not updated)", + state: visitorState{}, + left: dyn.NewValue(float32(1.0), leftLocation), + right: dyn.NewValue(float64(1.0), rightLocation), + expected: dyn.NewValue(float32(1.0), leftLocation), + }, + { + name: "time (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue(time.UnixMilli(10000), leftLocation), + right: dyn.NewValue(time.UnixMilli(10001), rightLocation), + expected: dyn.NewValue(time.UnixMilli(10001), rightLocation), + }, + { + name: "time (not updated)", + state: visitorState{}, + left: dyn.NewValue(time.UnixMilli(10000), leftLocation), + right: dyn.NewValue(time.UnixMilli(10000), rightLocation), + expected: dyn.NewValue(time.UnixMilli(10000), leftLocation), + }, + { + name: "different types (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue("a", leftLocation), + right: dyn.NewValue(42, rightLocation), + expected: dyn.NewValue(42, rightLocation), + }, + { + name: "map - remove 'a', update 'b'", + state: visitorState{ + removed: []string{"root.a"}, + updated: []string{"root.b"}, + }, + left: dyn.NewValue( + map[string]dyn.Value{ + "a": dyn.NewValue(42, leftLocation), + "b": dyn.NewValue(10, leftLocation), + }, + leftLocation, + ), + right: dyn.NewValue( + map[string]dyn.Value{ + "b": dyn.NewValue(20, rightLocation), + }, + rightLocation, + ), + expected: dyn.NewValue( + map[string]dyn.Value{ + "b": dyn.NewValue(20, rightLocation), + }, + leftLocation, + ), + }, + { + name: "map - add 'a'", + state: visitorState{ + added: []string{"root.a"}, + }, + left: dyn.NewValue( + map[string]dyn.Value{ + "b": dyn.NewValue(10, leftLocation), + }, + leftLocation, + ), + right: dyn.NewValue( + map[string]dyn.Value{ + "a": dyn.NewValue(42, rightLocation), + "b": dyn.NewValue(10, rightLocation), + }, + leftLocation, + ), + expected: dyn.NewValue( + map[string]dyn.Value{ + "a": dyn.NewValue(42, rightLocation), + // location hasn't changed because value hasn't changed + "b": dyn.NewValue(10, leftLocation), + }, + leftLocation, + ), + }, + { + name: "map - remove 'a'", + state: visitorState{ + removed: []string{"root.a"}, + }, + left: dyn.NewValue( + map[string]dyn.Value{ + "a": dyn.NewValue(42, leftLocation), + "b": dyn.NewValue(10, leftLocation), + }, + leftLocation, + ), + right: dyn.NewValue( + map[string]dyn.Value{ + "b": dyn.NewValue(10, rightLocation), + }, + leftLocation, + ), + expected: dyn.NewValue( + map[string]dyn.Value{ + // location hasn't changed because value hasn't changed + "b": dyn.NewValue(10, leftLocation), + }, + leftLocation, + ), + }, + { + name: "map - add 'jobs.job_1'", + state: visitorState{ + added: []string{"root.jobs.job_1"}, + }, + left: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, leftLocation), + }, + leftLocation, + ), + }, + leftLocation, + ), + right: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, rightLocation), + "job_1": dyn.NewValue(1337, rightLocation), + }, + rightLocation, + ), + }, + rightLocation, + ), + expected: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, leftLocation), + "job_1": dyn.NewValue(1337, rightLocation), + }, + leftLocation, + ), + }, + leftLocation, + ), + }, + { + name: "map - remove nested key", + state: visitorState{removed: []string{"root.jobs.job_1"}}, + left: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, leftLocation), + "job_1": dyn.NewValue(1337, rightLocation), + }, + leftLocation, + ), + }, + leftLocation, + ), + right: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, rightLocation), + }, + rightLocation, + ), + }, + rightLocation, + ), + expected: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, leftLocation), + }, + leftLocation, + ), + }, + leftLocation, + ), + }, + { + name: "sequence - add", + state: visitorState{added: []string{"root[1]"}}, + left: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, leftLocation), + }, + leftLocation, + ), + right: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, rightLocation), + dyn.NewValue(10, rightLocation), + }, + rightLocation, + ), + expected: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, leftLocation), + dyn.NewValue(10, rightLocation), + }, + leftLocation, + ), + }, + { + name: "sequence - remove", + state: visitorState{removed: []string{"root[1]"}}, + left: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, leftLocation), + dyn.NewValue(10, leftLocation), + }, + leftLocation, + ), + right: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, rightLocation), + }, + rightLocation, + ), + expected: dyn.NewValue( + []dyn.Value{ + // location hasn't changed because value hasn't changed + dyn.NewValue(42, leftLocation), + }, + leftLocation, + ), + }, + { + name: "sequence (not updated)", + state: visitorState{}, + left: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, leftLocation), + }, + leftLocation, + ), + right: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, rightLocation), + }, + rightLocation, + ), + expected: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, leftLocation), + }, + leftLocation, + ), + }, + { + name: "nil (not updated)", + state: visitorState{}, + left: dyn.NilValue, + right: dyn.NilValue, + expected: dyn.NilValue, + }, + { + name: "nil (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NilValue, + right: dyn.NewValue(42, rightLocation), + expected: dyn.NewValue(42, rightLocation), + }, + { + name: "change kind (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue(42.0, leftLocation), + right: dyn.NewValue(42, rightLocation), + expected: dyn.NewValue(42, rightLocation), + }, + } + + for _, tc := range modifiedTestCases { + t.Run(tc.name, func(t *testing.T) { + s, visitor := createVisitor() + out, err := override(dyn.NewPath(dyn.Key("root")), tc.left, tc.right, visitor) + + assert.NoError(t, err) + assert.Equal(t, tc.state, *s) + assert.Equal(t, tc.expected, out) + }) + } +} + +func TestOverride_PreserveMappingKeys(t *testing.T) { + leftLocation := dyn.Location{File: "left.yml", Line: 1, Column: 1} + leftKeyLocation := dyn.Location{File: "left.yml", Line: 2, Column: 1} + leftValueLocation := dyn.Location{File: "left.yml", Line: 3, Column: 1} + + rightLocation := dyn.Location{File: "right.yml", Line: 1, Column: 1} + rightKeyLocation := dyn.Location{File: "right.yml", Line: 2, Column: 1} + rightValueLocation := dyn.Location{File: "right.yml", Line: 3, Column: 1} + + left := dyn.NewMapping() + left.Set(dyn.NewValue("a", leftKeyLocation), dyn.NewValue(42, leftValueLocation)) + + right := dyn.NewMapping() + right.Set(dyn.NewValue("a", rightKeyLocation), dyn.NewValue(7, rightValueLocation)) + + state, visitor := createVisitor() + + out, err := override( + dyn.EmptyPath, + dyn.NewValue(left, leftLocation), + dyn.NewValue(right, rightLocation), + visitor, + ) + + assert.NoError(t, err) + + if err != nil { + outPairs := out.MustMap().Pairs() + + assert.Equal(t, visitorState{updated: []string{"a"}}, state) + assert.Equal(t, 1, len(outPairs)) + + // mapping was first defined in left, so it should keep its location + assert.Equal(t, leftLocation, out.Location()) + + // if there is a validation error for key value, it should point + // to where it was initially defined + assert.Equal(t, leftKeyLocation, outPairs[0].Key.Location()) + + // the value should have updated location, because it has changed + assert.Equal(t, rightValueLocation, outPairs[0].Value.Location()) + } +} + +type visitorState struct { + added []string + removed []string + updated []string +} + +func createVisitor() (*visitorState, OverrideVisitor) { + s := visitorState{} + + return &s, OverrideVisitor{ + VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + s.updated = append(s.updated, valuePath.String()) + + return right, nil + }, + VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { + s.removed = append(s.removed, valuePath.String()) + + return nil + }, + VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { + s.added = append(s.added, valuePath.String()) + + return right, nil + }, + } +} From a014d50a6af94c911ee46e57a4e2ffd3c13e8e53 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 17 May 2024 12:10:17 +0200 Subject: [PATCH 191/286] Fixed panic when loading incorrectly defined jobs (#1402) ## Changes If only key was defined for a job in YAML config, validate previously failed with segfault. This PR validates that jobs are correctly defined and returns an error if not. ## Tests Added regression test --- .../config/mutator/default_queueing_test.go | 12 +++++- bundle/config/resources.go | 42 +++++++++++++++++++ bundle/config/resources/job.go | 9 ++++ bundle/config/resources/mlflow_experiment.go | 28 +++++++++++++ bundle/config/resources/mlflow_model.go | 28 +++++++++++++ .../resources/model_serving_endpoint.go | 28 +++++++++++++ bundle/config/resources/pipeline.go | 9 ++++ bundle/config/resources/registered_model.go | 28 +++++++++++++ bundle/config/root.go | 8 ++++ bundle/permissions/filter_test.go | 7 ++++ bundle/permissions/mutator_test.go | 19 ++++++++- bundle/permissions/workspace_root_test.go | 4 +- .../my_first_job/resource.yml | 1 + .../my_second_job/resource.yml | 1 + bundle/tests/include_with_glob/job.yml | 1 + bundle/tests/undefined_job/databricks.yml | 8 ++++ bundle/tests/undefined_job_test.go | 12 ++++++ 17 files changed, 239 insertions(+), 6 deletions(-) create mode 100644 bundle/tests/undefined_job/databricks.yml create mode 100644 bundle/tests/undefined_job_test.go diff --git a/bundle/config/mutator/default_queueing_test.go b/bundle/config/mutator/default_queueing_test.go index ea60daf7f..d3621663b 100644 --- a/bundle/config/mutator/default_queueing_test.go +++ b/bundle/config/mutator/default_queueing_test.go @@ -56,7 +56,11 @@ func TestDefaultQueueingApplyEnableQueueing(t *testing.T) { Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "job": {}, + "job": { + JobSettings: &jobs.JobSettings{ + Name: "job", + }, + }, }, }, }, @@ -77,7 +81,11 @@ func TestDefaultQueueingApplyWithMultipleJobs(t *testing.T) { Queue: &jobs.QueueSettings{Enabled: false}, }, }, - "job2": {}, + "job2": { + JobSettings: &jobs.JobSettings{ + Name: "job", + }, + }, "job3": { JobSettings: &jobs.JobSettings{ Queue: &jobs.QueueSettings{Enabled: true}, diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 457360a0c..41ffc25cd 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -126,6 +126,47 @@ func (r *Resources) VerifyUniqueResourceIdentifiers() (*UniqueResourceIdTracker, return tracker, nil } +type resource struct { + resource ConfigResource + resource_type string + key string +} + +func (r *Resources) allResources() []resource { + all := make([]resource, 0) + for k, e := range r.Jobs { + all = append(all, resource{resource_type: "job", resource: e, key: k}) + } + for k, e := range r.Pipelines { + all = append(all, resource{resource_type: "pipeline", resource: e, key: k}) + } + for k, e := range r.Models { + all = append(all, resource{resource_type: "model", resource: e, key: k}) + } + for k, e := range r.Experiments { + all = append(all, resource{resource_type: "experiment", resource: e, key: k}) + } + for k, e := range r.ModelServingEndpoints { + all = append(all, resource{resource_type: "serving endpoint", resource: e, key: k}) + } + for k, e := range r.RegisteredModels { + all = append(all, resource{resource_type: "registered model", resource: e, key: k}) + } + return all +} + +func (r *Resources) VerifyAllResourcesDefined() error { + all := r.allResources() + for _, e := range all { + err := e.resource.Validate() + if err != nil { + return fmt.Errorf("%s %s is not defined", e.resource_type, e.key) + } + } + + return nil +} + // ConfigureConfigFilePath sets the specified path for all resources contained in this instance. // This property is used to correctly resolve paths relative to the path // of the configuration file they were defined in. @@ -153,6 +194,7 @@ func (r *Resources) ConfigureConfigFilePath() { type ConfigResource interface { Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) TerraformResourceName() string + Validate() error } func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) { diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index 45e9662d9..dde5d5663 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -2,6 +2,7 @@ package resources import ( "context" + "fmt" "strconv" "github.com/databricks/cli/bundle/config/paths" @@ -47,3 +48,11 @@ func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id stri func (j *Job) TerraformResourceName() string { return "databricks_job" } + +func (j *Job) Validate() error { + if j == nil || !j.DynamicValue.IsValid() || j.JobSettings == nil { + return fmt.Errorf("job is not defined") + } + + return nil +} diff --git a/bundle/config/resources/mlflow_experiment.go b/bundle/config/resources/mlflow_experiment.go index 0f53096a0..7854ee7e8 100644 --- a/bundle/config/resources/mlflow_experiment.go +++ b/bundle/config/resources/mlflow_experiment.go @@ -1,7 +1,12 @@ package resources import ( + "context" + "fmt" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/ml" ) @@ -23,3 +28,26 @@ func (s *MlflowExperiment) UnmarshalJSON(b []byte) error { func (s MlflowExperiment) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } + +func (s *MlflowExperiment) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.Experiments.GetExperiment(ctx, ml.GetExperimentRequest{ + ExperimentId: id, + }) + if err != nil { + log.Debugf(ctx, "experiment %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *MlflowExperiment) TerraformResourceName() string { + return "databricks_mlflow_experiment" +} + +func (s *MlflowExperiment) Validate() error { + if s == nil || !s.DynamicValue.IsValid() { + return fmt.Errorf("experiment is not defined") + } + + return nil +} diff --git a/bundle/config/resources/mlflow_model.go b/bundle/config/resources/mlflow_model.go index 59893aa47..40da9f87d 100644 --- a/bundle/config/resources/mlflow_model.go +++ b/bundle/config/resources/mlflow_model.go @@ -1,7 +1,12 @@ package resources import ( + "context" + "fmt" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/ml" ) @@ -23,3 +28,26 @@ func (s *MlflowModel) UnmarshalJSON(b []byte) error { func (s MlflowModel) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } + +func (s *MlflowModel) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.ModelRegistry.GetModel(ctx, ml.GetModelRequest{ + Name: id, + }) + if err != nil { + log.Debugf(ctx, "model %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *MlflowModel) TerraformResourceName() string { + return "databricks_mlflow_model" +} + +func (s *MlflowModel) Validate() error { + if s == nil || !s.DynamicValue.IsValid() { + return fmt.Errorf("model is not defined") + } + + return nil +} diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go index d1d57bafc..503cfbbb7 100644 --- a/bundle/config/resources/model_serving_endpoint.go +++ b/bundle/config/resources/model_serving_endpoint.go @@ -1,7 +1,12 @@ package resources import ( + "context" + "fmt" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/serving" ) @@ -33,3 +38,26 @@ func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error { func (s ModelServingEndpoint) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } + +func (s *ModelServingEndpoint) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.ServingEndpoints.Get(ctx, serving.GetServingEndpointRequest{ + Name: id, + }) + if err != nil { + log.Debugf(ctx, "serving endpoint %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *ModelServingEndpoint) TerraformResourceName() string { + return "databricks_model_serving" +} + +func (s *ModelServingEndpoint) Validate() error { + if s == nil || !s.DynamicValue.IsValid() { + return fmt.Errorf("serving endpoint is not defined") + } + + return nil +} diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index 2f9ff8d0d..7e914b909 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -2,6 +2,7 @@ package resources import ( "context" + "fmt" "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/log" @@ -42,3 +43,11 @@ func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id func (p *Pipeline) TerraformResourceName() string { return "databricks_pipeline" } + +func (p *Pipeline) Validate() error { + if p == nil || !p.DynamicValue.IsValid() { + return fmt.Errorf("pipeline is not defined") + } + + return nil +} diff --git a/bundle/config/resources/registered_model.go b/bundle/config/resources/registered_model.go index 7b4b70d1a..fba643c69 100644 --- a/bundle/config/resources/registered_model.go +++ b/bundle/config/resources/registered_model.go @@ -1,7 +1,12 @@ package resources import ( + "context" + "fmt" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/catalog" ) @@ -34,3 +39,26 @@ func (s *RegisteredModel) UnmarshalJSON(b []byte) error { func (s RegisteredModel) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } + +func (s *RegisteredModel) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.RegisteredModels.Get(ctx, catalog.GetRegisteredModelRequest{ + FullName: id, + }) + if err != nil { + log.Debugf(ctx, "registered model %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *RegisteredModel) TerraformResourceName() string { + return "databricks_registered_model" +} + +func (s *RegisteredModel) Validate() error { + if s == nil || !s.DynamicValue.IsValid() { + return fmt.Errorf("registered model is not defined") + } + + return nil +} diff --git a/bundle/config/root.go b/bundle/config/root.go index fda3759dd..88197c2b8 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -138,6 +138,14 @@ func (r *Root) updateWithDynamicValue(nv dyn.Value) error { // Assign the normalized configuration tree. r.value = nv + // At the moment the check has to be done as part of updateWithDynamicValue + // because otherwise ConfigureConfigFilePath will fail with a panic. + // In the future, we should move this check to a separate mutator in initialise phase. + err = r.Resources.VerifyAllResourcesDefined() + if err != nil { + return err + } + // Assign config file paths after converting to typed configuration. r.ConfigureConfigFilePath() return nil diff --git a/bundle/permissions/filter_test.go b/bundle/permissions/filter_test.go index 410fa4be8..121ce10dc 100644 --- a/bundle/permissions/filter_test.go +++ b/bundle/permissions/filter_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/assert" ) @@ -45,9 +46,15 @@ func testFixture(userName string) *bundle.Bundle { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job1": { + JobSettings: &jobs.JobSettings{ + Name: "job1", + }, Permissions: p, }, "job2": { + JobSettings: &jobs.JobSettings{ + Name: "job2", + }, Permissions: p, }, }, diff --git a/bundle/permissions/mutator_test.go b/bundle/permissions/mutator_test.go index 438a15061..1a177d902 100644 --- a/bundle/permissions/mutator_test.go +++ b/bundle/permissions/mutator_test.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" ) @@ -23,8 +24,16 @@ func TestApplyBundlePermissions(t *testing.T) { }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "job_1": {}, - "job_2": {}, + "job_1": { + JobSettings: &jobs.JobSettings{ + Name: "job_1", + }, + }, + "job_2": { + JobSettings: &jobs.JobSettings{ + Name: "job_2", + }, + }, }, Pipelines: map[string]*resources.Pipeline{ "pipeline_1": {}, @@ -109,11 +118,17 @@ func TestWarningOnOverlapPermission(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job_1": { + JobSettings: &jobs.JobSettings{ + Name: "job_1", + }, Permissions: []resources.Permission{ {Level: CAN_VIEW, UserName: "TestUser"}, }, }, "job_2": { + JobSettings: &jobs.JobSettings{ + Name: "job_2", + }, Permissions: []resources.Permission{ {Level: CAN_VIEW, UserName: "TestUser2"}, }, diff --git a/bundle/permissions/workspace_root_test.go b/bundle/permissions/workspace_root_test.go index 7dd97b62d..5e23a1da8 100644 --- a/bundle/permissions/workspace_root_test.go +++ b/bundle/permissions/workspace_root_test.go @@ -30,8 +30,8 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) { }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "job_1": {JobSettings: &jobs.JobSettings{}}, - "job_2": {JobSettings: &jobs.JobSettings{}}, + "job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}}, + "job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}}, }, Pipelines: map[string]*resources.Pipeline{ "pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}}, diff --git a/bundle/tests/include_multiple/my_first_job/resource.yml b/bundle/tests/include_multiple/my_first_job/resource.yml index c2be5a160..4bd7c7164 100644 --- a/bundle/tests/include_multiple/my_first_job/resource.yml +++ b/bundle/tests/include_multiple/my_first_job/resource.yml @@ -2,3 +2,4 @@ resources: jobs: my_first_job: id: 1 + name: "My First Job" diff --git a/bundle/tests/include_multiple/my_second_job/resource.yml b/bundle/tests/include_multiple/my_second_job/resource.yml index 2c28c4622..3a1514055 100644 --- a/bundle/tests/include_multiple/my_second_job/resource.yml +++ b/bundle/tests/include_multiple/my_second_job/resource.yml @@ -2,3 +2,4 @@ resources: jobs: my_second_job: id: 2 + name: "My Second Job" diff --git a/bundle/tests/include_with_glob/job.yml b/bundle/tests/include_with_glob/job.yml index 3d609c529..a98577818 100644 --- a/bundle/tests/include_with_glob/job.yml +++ b/bundle/tests/include_with_glob/job.yml @@ -2,3 +2,4 @@ resources: jobs: my_job: id: 1 + name: "My Job" diff --git a/bundle/tests/undefined_job/databricks.yml b/bundle/tests/undefined_job/databricks.yml new file mode 100644 index 000000000..12c19f946 --- /dev/null +++ b/bundle/tests/undefined_job/databricks.yml @@ -0,0 +1,8 @@ +bundle: + name: undefined-job + +resources: + jobs: + undefined: + test: + name: "Test Job" diff --git a/bundle/tests/undefined_job_test.go b/bundle/tests/undefined_job_test.go new file mode 100644 index 000000000..ed502c471 --- /dev/null +++ b/bundle/tests/undefined_job_test.go @@ -0,0 +1,12 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUndefinedJobLoadsWithError(t *testing.T) { + _, diags := loadTargetWithDiags("./undefined_job", "default") + assert.ErrorContains(t, diags.Error(), "job undefined is not defined") +} From dc13e4b37e0bccce1d9d120a53065b1ebd42f521 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 08:29:24 +0200 Subject: [PATCH 192/286] Bump github.com/fatih/color from 1.16.0 to 1.17.0 (#1441) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/fatih/color](https://github.com/fatih/color) from 1.16.0 to 1.17.0.
Release notes

Sourced from github.com/fatih/color's releases.

v1.17.0

What's Changed

New Contributors

Full Changelog: https://github.com/fatih/color/compare/v1.16.0...v1.17.0

Commits
  • b6598b1 Merge pull request #228 from klauspost/fix-println-issue-218
  • 00b1811 Fix multi-parameter println spacing
  • 04994a8 Merge pull request #224 from fatih/dependabot/go_modules/golang.org/x/sys-0.18.0
  • 7526cad Merge branch 'main' into dependabot/go_modules/golang.org/x/sys-0.18.0
  • 8d058ca Merge pull request #222 from fatih/ci-updates
  • 2ac809f Bump golang.org/x/sys from 0.17.0 to 0.18.0
  • 51a7bbf ci: update Go and Staticcheck versions
  • 799c49c Merge pull request #217 from fatih/dependabot/github_actions/actions/setup-go-5
  • f8e0ec9 Merge branch 'main' into dependabot/github_actions/actions/setup-go-5
  • 298abd8 Merge pull request #221 from fatih/dependabot/go_modules/golang.org/x/sys-0.17.0
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/fatih/color&package-manager=go_modules&previous-version=1.16.0&new-version=1.17.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6c8e845a5..d42a5d0f2 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.0 // Apache 2.0 github.com/databricks/databricks-sdk-go v0.40.1 // Apache 2.0 - github.com/fatih/color v1.16.0 // MIT + github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 diff --git a/go.sum b/go.sum index 222ce1e4c..37d848d24 100644 --- a/go.sum +++ b/go.sum @@ -40,8 +40,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= From 7262138b4daf7b2cbb51d69f90e67b500682147c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 08:29:50 +0200 Subject: [PATCH 193/286] Bump github.com/hashicorp/terraform-json from 0.21.0 to 0.22.1 (#1440) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/hashicorp/terraform-json](https://github.com/hashicorp/terraform-json) from 0.21.0 to 0.22.1.
Release notes

Sourced from github.com/hashicorp/terraform-json's releases.

v0.22.1

BUG FIXES:

Full Changelog: https://github.com/hashicorp/terraform-json/compare/v0.22.0...v0.22.1

v0.22.0

ENHANCEMENTS:

INTERNAL:

New Contributors

Full Changelog: https://github.com/hashicorp/terraform-json/compare/v0.21.0...v0.22.0

Commits
  • 7e28e2d tfjson: Update Complete to a pointer value for older Terraform versions (#131)
  • 5e08e15 Bump hashicorp/setup-copywrite (#130)
  • 4a9d1e7 github: Set up Dependabot to manage HashiCorp-owned Actions versions (#128)
  • 11f603e Result of tsccr-helper -log-level=info gha update -latest . (#127)
  • 6e83e7b Result of tsccr-helper -log-level=info gha update -latest . (#124)
  • 3b8a921 tfjson: Add DeferredChanges and Complete to Plan JSON (#123)
  • 8cba21a Bump github.com/zclconf/go-cty from 1.14.3 to 1.14.4 (#122)
  • d5065f2 Bump github.com/zclconf/go-cty from 1.14.2 to 1.14.3 (#121)
  • 1498774 Bump github.com/zclconf/go-cty from 1.14.1 to 1.14.2 (#120)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/terraform-json&package-manager=go_modules&previous-version=0.21.0&new-version=0.22.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index d42a5d0f2..4a6ea2e03 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/hashicorp/go-version v1.6.0 // MPL 2.0 github.com/hashicorp/hc-install v0.6.4 // MPL 2.0 github.com/hashicorp/terraform-exec v0.20.0 // MPL 2.0 - github.com/hashicorp/terraform-json v0.21.0 // MPL 2.0 + github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0 github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT github.com/nwidger/jsoncolor v0.3.2 // MIT @@ -51,7 +51,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/zclconf/go-cty v1.14.1 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.24.0 // indirect diff --git a/go.sum b/go.sum index 37d848d24..051774b4b 100644 --- a/go.sum +++ b/go.sum @@ -103,8 +103,8 @@ github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9 github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= -github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= -github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= +github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -154,8 +154,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= -github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= From 3ce833f82602ede05801078adfd44a51d317cdd8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 08:48:16 +0200 Subject: [PATCH 194/286] Bump github.com/hashicorp/terraform-exec from 0.20.0 to 0.21.0 (#1442) Bumps [github.com/hashicorp/terraform-exec](https://github.com/hashicorp/terraform-exec) from 0.20.0 to 0.21.0.
Release notes

Sourced from github.com/hashicorp/terraform-exec's releases.

v0.21.0

ENHANCEMENTS:

  • tfexec: Add -allow-deferral to (Terraform).Apply() and (Terraform).Plan() methods (#447)
Changelog

Sourced from github.com/hashicorp/terraform-exec's changelog.

0.21.0 (May 17, 2024)

ENHANCEMENTS:

  • tfexec: Add -allow-deferral to (Terraform).Apply() and (Terraform).Plan() methods (#447)
Commits
  • b6ae175 v0.21.0 [skip ci]
  • 67e92f4 build(deps): bump github.com/hashicorp/terraform-json from 0.22.0 to 0.22.1 (...
  • 64df8d2 build(deps): bump github.com/hashicorp/terraform-json from 0.21.0 to 0.22.0 (...
  • af05782 build(deps): Bump workflows to latest trusted versions (#450)
  • 1df7d52 build(deps): bump golang.org/x/net from 0.22.0 to 0.23.0 (#444)
  • 6ea7295 build(deps): bump hashicorp/setup-copywrite from 1.1.2 to 1.1.3 in the github...
  • a9c9728 tfexec: Add -allow-deferral experimental options to Plan and Apply comm...
  • c07c678 Reenable Dependabot for internal GitHub actions (#455)
  • 259b9e9 build(deps): bump github.com/hashicorp/hc-install from 0.6.3 to 0.6.4 (#443)
  • 46360f1 build(deps): bump github.com/zclconf/go-cty from 1.14.3 to 1.14.4 (#441)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/terraform-exec&package-manager=go_modules&previous-version=0.20.0&new-version=0.21.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4a6ea2e03..ddebe9727 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/hashicorp/go-version v1.6.0 // MPL 2.0 github.com/hashicorp/hc-install v0.6.4 // MPL 2.0 - github.com/hashicorp/terraform-exec v0.20.0 // MPL 2.0 + github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0 github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT diff --git a/go.sum b/go.sum index 051774b4b..1dccbb2f9 100644 --- a/go.sum +++ b/go.sum @@ -101,8 +101,8 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= -github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= -github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= +github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= +github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= From 09aa3cb9e93581d4cce0aef024ad48723c45a683 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Tue, 21 May 2024 08:48:42 +0200 Subject: [PATCH 195/286] Add more tests for `merge.Override` (#1439) ## Changes Add test coverage to ensure we respect return value and error ## Tests Unit tests --- libs/dyn/merge/override_test.go | 65 ++++++++++++++++++++++++++++++--- 1 file changed, 59 insertions(+), 6 deletions(-) diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go index dbf249d12..a34f23424 100644 --- a/libs/dyn/merge/override_test.go +++ b/libs/dyn/merge/override_test.go @@ -1,6 +1,7 @@ package merge import ( + "fmt" "testing" "time" @@ -351,13 +352,48 @@ func TestOverride_Primitive(t *testing.T) { for _, tc := range modifiedTestCases { t.Run(tc.name, func(t *testing.T) { - s, visitor := createVisitor() + s, visitor := createVisitor(visitorOpts{}) out, err := override(dyn.NewPath(dyn.Key("root")), tc.left, tc.right, visitor) assert.NoError(t, err) assert.Equal(t, tc.state, *s) assert.Equal(t, tc.expected, out) }) + + modified := len(tc.state.removed)+len(tc.state.added)+len(tc.state.updated) > 0 + + // visitor is not used unless there is a change + + if modified { + t.Run(tc.name+" - visitor has error", func(t *testing.T) { + _, visitor := createVisitor(visitorOpts{error: fmt.Errorf("unexpected change in test")}) + _, err := override(dyn.EmptyPath, tc.left, tc.right, visitor) + + assert.EqualError(t, err, "unexpected change in test") + }) + + t.Run(tc.name+" - visitor overrides value", func(t *testing.T) { + expected := dyn.NewValue("return value", dyn.Location{}) + s, visitor := createVisitor(visitorOpts{returnValue: &expected}) + out, err := override(dyn.EmptyPath, tc.left, tc.right, visitor) + + assert.NoError(t, err) + + for _, added := range s.added { + actual, err := dyn.GetByPath(out, dyn.MustPathFromString(added)) + + assert.NoError(t, err) + assert.Equal(t, expected, actual) + } + + for _, updated := range s.updated { + actual, err := dyn.GetByPath(out, dyn.MustPathFromString(updated)) + + assert.NoError(t, err) + assert.Equal(t, expected, actual) + } + }) + } } } @@ -376,7 +412,7 @@ func TestOverride_PreserveMappingKeys(t *testing.T) { right := dyn.NewMapping() right.Set(dyn.NewValue("a", rightKeyLocation), dyn.NewValue(7, rightValueLocation)) - state, visitor := createVisitor() + state, visitor := createVisitor(visitorOpts{}) out, err := override( dyn.EmptyPath, @@ -411,24 +447,41 @@ type visitorState struct { updated []string } -func createVisitor() (*visitorState, OverrideVisitor) { +type visitorOpts struct { + error error + returnValue *dyn.Value +} + +func createVisitor(opts visitorOpts) (*visitorState, OverrideVisitor) { s := visitorState{} return &s, OverrideVisitor{ VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { s.updated = append(s.updated, valuePath.String()) - return right, nil + if opts.error != nil { + return dyn.NilValue, opts.error + } else if opts.returnValue != nil { + return *opts.returnValue, nil + } else { + return right, nil + } }, VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { s.removed = append(s.removed, valuePath.String()) - return nil + return opts.error }, VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { s.added = append(s.added, valuePath.String()) - return right, nil + if opts.error != nil { + return dyn.NilValue, opts.error + } else if opts.returnValue != nil { + return *opts.returnValue, nil + } else { + return right, nil + } }, } } From 3f8036f2dfbf2624be3c20edc43393487579c8a2 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 21 May 2024 12:00:04 +0200 Subject: [PATCH 196/286] Fixed seg fault when specifying environment key for tasks (#1443) ## Changes Fixed seg fault when specifying environment key for tasks --- bundle/artifacts/artifacts.go | 4 ++++ bundle/libraries/libraries.go | 4 ++++ bundle/libraries/match.go | 4 ++++ bundle/tests/enviroment_key_test.go | 11 +++++++++++ bundle/tests/environment_key_only/databricks.yml | 16 ++++++++++++++++ 5 files changed, 39 insertions(+) create mode 100644 bundle/tests/environment_key_only/databricks.yml diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index 101b598dd..470c329a1 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -150,6 +150,10 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u for i := range job.Environments { env := &job.Environments[i] + if env.Spec == nil { + continue + } + for j := range env.Spec.Dependencies { lib := env.Spec.Dependencies[j] if isArtifactMatchLibrary(f, lib, b) { diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index a79adedbf..84ead052b 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -30,6 +30,10 @@ func FindAllEnvironments(b *bundle.Bundle) map[string]([]jobs.JobEnvironment) { func isEnvsWithLocalLibraries(envs []jobs.JobEnvironment) bool { for _, e := range envs { + if e.Spec == nil { + continue + } + for _, l := range e.Spec.Dependencies { if IsEnvironmentDependencyLocal(l) { return true diff --git a/bundle/libraries/match.go b/bundle/libraries/match.go index 096cdf4a5..4feb4225d 100644 --- a/bundle/libraries/match.go +++ b/bundle/libraries/match.go @@ -62,6 +62,10 @@ func validateTaskLibraries(libs []compute.Library, b *bundle.Bundle) error { func validateEnvironments(envs []jobs.JobEnvironment, b *bundle.Bundle) error { for _, env := range envs { + if env.Spec == nil { + continue + } + for _, dep := range env.Spec.Dependencies { matches, err := filepath.Glob(filepath.Join(b.RootPath, dep)) if err != nil { diff --git a/bundle/tests/enviroment_key_test.go b/bundle/tests/enviroment_key_test.go index 3e12ddb68..aed3964db 100644 --- a/bundle/tests/enviroment_key_test.go +++ b/bundle/tests/enviroment_key_test.go @@ -1,8 +1,11 @@ package config_tests import ( + "context" "testing" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" "github.com/stretchr/testify/require" ) @@ -10,3 +13,11 @@ func TestEnvironmentKeySupported(t *testing.T) { _, diags := loadTargetWithDiags("./python_wheel/environment_key", "default") require.Empty(t, diags) } + +func TestEnvironmentKeyProvidedAndNoPanic(t *testing.T) { + b, diags := loadTargetWithDiags("./environment_key_only", "default") + require.Empty(t, diags) + + diags = bundle.Apply(context.Background(), b, libraries.ValidateLocalLibrariesExist()) + require.Empty(t, diags) +} diff --git a/bundle/tests/environment_key_only/databricks.yml b/bundle/tests/environment_key_only/databricks.yml new file mode 100644 index 000000000..caa34f8e3 --- /dev/null +++ b/bundle/tests/environment_key_only/databricks.yml @@ -0,0 +1,16 @@ +bundle: + name: environment_key_only + +resources: + jobs: + test_job: + name: "My Wheel Job" + tasks: + - task_key: TestTask + existing_cluster_id: "0717-132531-5opeqon1" + python_wheel_task: + package_name: "my_test_code" + entry_point: "run" + environment_key: "test_env" + environments: + - environment_key: "test_env" From c5032644a0c218e5b4c96f49eeaeb7a7b03985e4 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 21 May 2024 17:23:00 +0530 Subject: [PATCH 197/286] Fix conversion of zero valued scalar pointers to a dynamic value (#1433) ## Changes This PR also fixes empty values variable overrides using the --var flag. Now, using `--var="my_variable="` will set the value of `my_variable` to the empty string instead of ignoring the flag altogether. ## Tests The change using a unit test. Manually verified the `--var` flag works now. --- libs/dyn/convert/end_to_end_test.go | 45 +++++++++++++++++++++++++++++ libs/dyn/convert/from_typed.go | 42 ++++++++++++++++++--------- 2 files changed, 73 insertions(+), 14 deletions(-) diff --git a/libs/dyn/convert/end_to_end_test.go b/libs/dyn/convert/end_to_end_test.go index 33902bea8..f0e428a69 100644 --- a/libs/dyn/convert/end_to_end_test.go +++ b/libs/dyn/convert/end_to_end_test.go @@ -67,4 +67,49 @@ func TestAdditional(t *testing.T) { SliceOfPointer: []*string{nil}, }) }) + + t.Run("pointer to a empty string", func(t *testing.T) { + s := "" + assertFromTypedToTypedEqual(t, &s) + }) + + t.Run("nil pointer", func(t *testing.T) { + var s *string + assertFromTypedToTypedEqual(t, s) + }) + + t.Run("pointer to struct with scalar values", func(t *testing.T) { + s := "" + type foo struct { + A string `json:"a"` + B int `json:"b"` + C bool `json:"c"` + D *string `json:"d"` + } + assertFromTypedToTypedEqual(t, &foo{ + A: "a", + B: 1, + C: true, + D: &s, + }) + assertFromTypedToTypedEqual(t, &foo{ + A: "", + B: 0, + C: false, + D: nil, + }) + }) + + t.Run("map with scalar values", func(t *testing.T) { + assertFromTypedToTypedEqual(t, map[string]string{ + "a": "a", + "b": "b", + "c": "", + }) + assertFromTypedToTypedEqual(t, map[string]int{ + "a": 1, + "b": 0, + "c": 2, + }) + }) } diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index c344d12df..ae491d8ab 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -12,16 +12,22 @@ import ( type fromTypedOptions int const ( - // Use the zero value instead of setting zero values to nil. This is useful - // for types where the zero values and nil are semantically different. That is - // strings, bools, ints, floats. + // If this flag is set, zero values for scalars (strings, bools, ints, floats) + // would resolve to corresponding zero values in the dynamic representation. + // Otherwise, zero values for scalars resolve to dyn.NilValue. // - // Note: this is not needed for structs because dyn.NilValue is converted back - // to a zero value when using the convert.ToTyped function. + // This flag exists to reconcile the default values for scalars in a Go struct + // being zero values with zero values in a dynamic representation. In a Go struct, + // zero values are the same as the values not being set at all. This is not the case + // in the dynamic representation. // - // Values in maps and slices should be set to zero values, and not nil in the - // dynamic representation. - includeZeroValues fromTypedOptions = 1 << iota + // If a scalar value in a typed Go struct is zero, in the dynamic representation + // we would set it to dyn.NilValue, i.e. equivalent to the value not being set at all. + // + // If a scalar value in a Go map, slice or pointer is set to zero, we will set it + // to the zero value in the dynamic representation, and not dyn.NilValue. This is + // equivalent to the value being intentionally set to zero. + includeZeroValuedScalars fromTypedOptions = 1 << iota ) // FromTyped converts changes made in the typed structure w.r.t. the configuration value @@ -41,6 +47,14 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, return dyn.NilValue, nil } srcv = srcv.Elem() + + // If a pointer to a scalar type points to a zero value, we should include + // that zero value in the dynamic representation. + // This is because by default a pointer is nil in Go, and it not being nil + // indicates its value was intentionally set to zero. + if !slices.Contains(options, includeZeroValuedScalars) { + options = append(options, includeZeroValuedScalars) + } } switch srcv.Kind() { @@ -129,7 +143,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { } // Convert entry taking into account the reference value (may be equal to dyn.NilValue). - nv, err := fromTyped(v.Interface(), refv, includeZeroValues) + nv, err := fromTyped(v.Interface(), refv, includeZeroValuedScalars) if err != nil { return dyn.InvalidValue, err } @@ -160,7 +174,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { v := src.Index(i) // Convert entry taking into account the reference value (may be equal to dyn.NilValue). - nv, err := fromTyped(v.Interface(), ref.Index(i), includeZeroValues) + nv, err := fromTyped(v.Interface(), ref.Index(i), includeZeroValuedScalars) if err != nil { return dyn.InvalidValue, err } @@ -183,7 +197,7 @@ func fromTypedString(src reflect.Value, ref dyn.Value, options ...fromTypedOptio case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValues) { + if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { return dyn.NilValue, nil } return dyn.V(src.String()), nil @@ -203,7 +217,7 @@ func fromTypedBool(src reflect.Value, ref dyn.Value, options ...fromTypedOptions case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValues) { + if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { return dyn.NilValue, nil } return dyn.V(src.Bool()), nil @@ -228,7 +242,7 @@ func fromTypedInt(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValues) { + if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { return dyn.NilValue, nil } return dyn.V(src.Int()), nil @@ -253,7 +267,7 @@ func fromTypedFloat(src reflect.Value, ref dyn.Value, options ...fromTypedOption case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValues) { + if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { return dyn.NilValue, nil } return dyn.V(src.Float()), nil From 63ceede3350ad87929ecf0cb6df78fd6a3c1ae37 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 22 May 2024 09:41:32 +0200 Subject: [PATCH 198/286] Update Go SDK to v0.41.0 (#1445) ## Changes Release notes at https://github.com/databricks/databricks-sdk-go/releases/tag/v0.41.0. ## Tests n/a --- .codegen/_openapi_sha | 2 +- .gitattributes | 2 +- bundle/schema/docs/bundle_descriptions.json | 216 ++++++++++++++---- .../esm-enablement-account.go | 3 - .../automatic-cluster-update.go | 3 - cmd/workspace/clusters/clusters.go | 23 +- cmd/workspace/cmd.go | 4 +- .../compliance-security-profile.go | 3 - cmd/workspace/connections/connections.go | 22 +- .../consumer-listings/consumer-listings.go | 4 +- .../enhanced-security-monitoring.go | 3 - cmd/workspace/libraries/libraries.go | 7 +- cmd/workspace/pipelines/pipelines.go | 1 + .../quality-monitors.go} | 31 +-- .../serving-endpoints/serving-endpoints.go | 6 +- cmd/workspace/shares/shares.go | 5 + .../system-schemas/system-schemas.go | 12 +- .../vector-search-indexes.go | 71 ++++++ go.mod | 2 +- go.sum | 4 +- 20 files changed, 297 insertions(+), 127 deletions(-) rename cmd/workspace/{lakehouse-monitors/lakehouse-monitors.go => quality-monitors/quality-monitors.go} (95%) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index f07cf44e5..8c62ac620 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -9bb7950fa3390afb97abaa552934bc0a2e069de5 \ No newline at end of file +7eb5ad9a2ed3e3f1055968a2d1014ac92c06fe92 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index fb42588a7..c11257e9e 100755 --- a/.gitattributes +++ b/.gitattributes @@ -62,7 +62,6 @@ cmd/workspace/instance-pools/instance-pools.go linguist-generated=true cmd/workspace/instance-profiles/instance-profiles.go linguist-generated=true cmd/workspace/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/workspace/jobs/jobs.go linguist-generated=true -cmd/workspace/lakehouse-monitors/lakehouse-monitors.go linguist-generated=true cmd/workspace/lakeview/lakeview.go linguist-generated=true cmd/workspace/libraries/libraries.go linguist-generated=true cmd/workspace/metastores/metastores.go linguist-generated=true @@ -81,6 +80,7 @@ cmd/workspace/provider-personalization-requests/provider-personalization-request cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true cmd/workspace/provider-providers/provider-providers.go linguist-generated=true cmd/workspace/providers/providers.go linguist-generated=true +cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true cmd/workspace/queries/queries.go linguist-generated=true cmd/workspace/query-history/query-history.go linguist-generated=true cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index ba6fe8ce2..b6d0235aa 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -348,7 +348,7 @@ "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { - "description": "" + "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied." }, "autoscale": { "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", @@ -424,14 +424,6 @@ } } }, - "clone_from": { - "description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.", - "properties": { - "source_cluster_id": { - "description": "The cluster that is being cloned." - } - } - }, "cluster_log_conf": { "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "properties": { @@ -474,9 +466,6 @@ "cluster_name": { "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" }, - "cluster_source": { - "description": "" - }, "custom_tags": { "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "additionalproperties": { @@ -975,7 +964,7 @@ "description": "If new_cluster, a description of a new cluster that is created for each run.", "properties": { "apply_policy_default_values": { - "description": "" + "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied." }, "autoscale": { "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", @@ -1051,14 +1040,6 @@ } } }, - "clone_from": { - "description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.", - "properties": { - "source_cluster_id": { - "description": "The cluster that is being cloned." - } - } - }, "cluster_log_conf": { "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "properties": { @@ -1101,9 +1082,6 @@ "cluster_name": { "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" }, - "cluster_source": { - "description": "" - }, "custom_tags": { "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "additionalproperties": { @@ -1419,7 +1397,7 @@ } }, "python_named_params": { - "description": "A map from keys to values for jobs with Python wheel task, for example `\"python_named_params\": {\"name\": \"task\", \"data\": \"dbfs:/path/to/data.json\"}`.", + "description": "", "additionalproperties": { "description": "" } @@ -1853,6 +1831,15 @@ "openai_config": { "description": "OpenAI Config. Only required if the provider is 'openai'.", "properties": { + "microsoft_entra_client_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n" + }, + "microsoft_entra_client_secret": { + "description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n" + }, + "microsoft_entra_tenant_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n" + }, "openai_api_base": { "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" }, @@ -2009,6 +1996,9 @@ } } }, + "route_optimized": { + "description": "Enable route optimization for the serving endpoint." + }, "tags": { "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", "items": { @@ -2469,6 +2459,23 @@ } } }, + "gateway_definition": { + "description": "The definition of a gateway pipeline to support CDC.", + "properties": { + "connection_id": { + "description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source." + }, + "gateway_storage_catalog": { + "description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location." + }, + "gateway_storage_name": { + "description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n" + }, + "gateway_storage_schema": { + "description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location." + } + } + }, "id": { "description": "Unique identifier for this pipeline." }, @@ -2500,6 +2507,23 @@ }, "source_schema": { "description": "Required. Schema name in the source database." + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } } } }, @@ -2523,11 +2547,45 @@ }, "source_table": { "description": "Required. Table name in the source database." + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } } } } } } + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } } } }, @@ -3071,7 +3129,7 @@ "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { - "description": "" + "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied." }, "autoscale": { "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", @@ -3147,14 +3205,6 @@ } } }, - "clone_from": { - "description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.", - "properties": { - "source_cluster_id": { - "description": "The cluster that is being cloned." - } - } - }, "cluster_log_conf": { "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "properties": { @@ -3197,9 +3247,6 @@ "cluster_name": { "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" }, - "cluster_source": { - "description": "" - }, "custom_tags": { "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "additionalproperties": { @@ -3698,7 +3745,7 @@ "description": "If new_cluster, a description of a new cluster that is created for each run.", "properties": { "apply_policy_default_values": { - "description": "" + "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied." }, "autoscale": { "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", @@ -3774,14 +3821,6 @@ } } }, - "clone_from": { - "description": "When specified, this clones libraries from a source cluster during the creation of a new cluster.", - "properties": { - "source_cluster_id": { - "description": "The cluster that is being cloned." - } - } - }, "cluster_log_conf": { "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", "properties": { @@ -3824,9 +3863,6 @@ "cluster_name": { "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" }, - "cluster_source": { - "description": "" - }, "custom_tags": { "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "additionalproperties": { @@ -4142,7 +4178,7 @@ } }, "python_named_params": { - "description": "A map from keys to values for jobs with Python wheel task, for example `\"python_named_params\": {\"name\": \"task\", \"data\": \"dbfs:/path/to/data.json\"}`.", + "description": "", "additionalproperties": { "description": "" } @@ -4576,6 +4612,15 @@ "openai_config": { "description": "OpenAI Config. Only required if the provider is 'openai'.", "properties": { + "microsoft_entra_client_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n" + }, + "microsoft_entra_client_secret": { + "description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n" + }, + "microsoft_entra_tenant_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n" + }, "openai_api_base": { "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" }, @@ -4732,6 +4777,9 @@ } } }, + "route_optimized": { + "description": "Enable route optimization for the serving endpoint." + }, "tags": { "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", "items": { @@ -5192,6 +5240,23 @@ } } }, + "gateway_definition": { + "description": "The definition of a gateway pipeline to support CDC.", + "properties": { + "connection_id": { + "description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source." + }, + "gateway_storage_catalog": { + "description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location." + }, + "gateway_storage_name": { + "description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n" + }, + "gateway_storage_schema": { + "description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location." + } + } + }, "id": { "description": "Unique identifier for this pipeline." }, @@ -5223,6 +5288,23 @@ }, "source_schema": { "description": "Required. Schema name in the source database." + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } } } }, @@ -5246,11 +5328,45 @@ }, "source_table": { "description": "Required. Table name in the source database." + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } } } } } } + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } } } }, diff --git a/cmd/account/esm-enablement-account/esm-enablement-account.go b/cmd/account/esm-enablement-account/esm-enablement-account.go index 49c21eb48..71149e5ad 100755 --- a/cmd/account/esm-enablement-account/esm-enablement-account.go +++ b/cmd/account/esm-enablement-account/esm-enablement-account.go @@ -25,9 +25,6 @@ func New() *cobra.Command { setting is disabled for new workspaces. After workspace creation, account admins can enable enhanced security monitoring individually for each workspace.`, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go index 681dba7b3..2385195bb 100755 --- a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go +++ b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go @@ -22,9 +22,6 @@ func New() *cobra.Command { Short: `Controls whether automatic cluster update is enabled for the current workspace.`, Long: `Controls whether automatic cluster update is enabled for the current workspace. By default, it is turned off.`, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index e657fd9c3..f4baab3b2 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -188,7 +188,7 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().BoolVar(&createReq.ApplyPolicyDefaultValues, "apply-policy-default-values", createReq.ApplyPolicyDefaultValues, ``) + cmd.Flags().BoolVar(&createReq.ApplyPolicyDefaultValues, "apply-policy-default-values", createReq.ApplyPolicyDefaultValues, `When set to true, fixed and default values from the policy will be used for fields that are omitted.`) // TODO: complex arg: autoscale cmd.Flags().IntVar(&createReq.AutoterminationMinutes, "autotermination-minutes", createReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`) // TODO: complex arg: aws_attributes @@ -196,15 +196,6 @@ func newCreate() *cobra.Command { // TODO: complex arg: clone_from // TODO: complex arg: cluster_log_conf cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`) - cmd.Flags().Var(&createReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. Supported values: [ - API, - JOB, - MODELS, - PIPELINE, - PIPELINE_MAINTENANCE, - SQL, - UI, -]`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ LEGACY_PASSTHROUGH, @@ -443,23 +434,13 @@ func newEdit() *cobra.Command { // TODO: short flags cmd.Flags().Var(&editJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().BoolVar(&editReq.ApplyPolicyDefaultValues, "apply-policy-default-values", editReq.ApplyPolicyDefaultValues, ``) + cmd.Flags().BoolVar(&editReq.ApplyPolicyDefaultValues, "apply-policy-default-values", editReq.ApplyPolicyDefaultValues, `When set to true, fixed and default values from the policy will be used for fields that are omitted.`) // TODO: complex arg: autoscale cmd.Flags().IntVar(&editReq.AutoterminationMinutes, "autotermination-minutes", editReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`) // TODO: complex arg: aws_attributes // TODO: complex arg: azure_attributes - // TODO: complex arg: clone_from // TODO: complex arg: cluster_log_conf cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`) - cmd.Flags().Var(&editReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. Supported values: [ - API, - JOB, - MODELS, - PIPELINE, - PIPELINE_MAINTENANCE, - SQL, - UI, -]`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ LEGACY_PASSTHROUGH, diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index a78b9bc1e..7ad9389a8 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -32,7 +32,6 @@ import ( instance_profiles "github.com/databricks/cli/cmd/workspace/instance-profiles" ip_access_lists "github.com/databricks/cli/cmd/workspace/ip-access-lists" jobs "github.com/databricks/cli/cmd/workspace/jobs" - lakehouse_monitors "github.com/databricks/cli/cmd/workspace/lakehouse-monitors" lakeview "github.com/databricks/cli/cmd/workspace/lakeview" libraries "github.com/databricks/cli/cmd/workspace/libraries" metastores "github.com/databricks/cli/cmd/workspace/metastores" @@ -51,6 +50,7 @@ import ( provider_provider_analytics_dashboards "github.com/databricks/cli/cmd/workspace/provider-provider-analytics-dashboards" provider_providers "github.com/databricks/cli/cmd/workspace/provider-providers" providers "github.com/databricks/cli/cmd/workspace/providers" + quality_monitors "github.com/databricks/cli/cmd/workspace/quality-monitors" queries "github.com/databricks/cli/cmd/workspace/queries" query_history "github.com/databricks/cli/cmd/workspace/query-history" query_visualizations "github.com/databricks/cli/cmd/workspace/query-visualizations" @@ -113,7 +113,6 @@ func All() []*cobra.Command { out = append(out, instance_profiles.New()) out = append(out, ip_access_lists.New()) out = append(out, jobs.New()) - out = append(out, lakehouse_monitors.New()) out = append(out, lakeview.New()) out = append(out, libraries.New()) out = append(out, metastores.New()) @@ -132,6 +131,7 @@ func All() []*cobra.Command { out = append(out, provider_provider_analytics_dashboards.New()) out = append(out, provider_providers.New()) out = append(out, providers.New()) + out = append(out, quality_monitors.New()) out = append(out, queries.New()) out = append(out, query_history.New()) out = append(out, query_visualizations.New()) diff --git a/cmd/workspace/compliance-security-profile/compliance-security-profile.go b/cmd/workspace/compliance-security-profile/compliance-security-profile.go index efafb4627..a7b45901f 100755 --- a/cmd/workspace/compliance-security-profile/compliance-security-profile.go +++ b/cmd/workspace/compliance-security-profile/compliance-security-profile.go @@ -25,9 +25,6 @@ func New() *cobra.Command { off. This settings can NOT be disabled once it is enabled.`, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index bdb266685..f76420fbe 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -154,7 +154,7 @@ func newDelete() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down." - names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx) + names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx, catalog.ListConnectionsRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Connections drop-down. Please manually specify required arguments. Original error: %w", err) @@ -224,7 +224,7 @@ func newGet() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down." - names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx) + names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx, catalog.ListConnectionsRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Connections drop-down. Please manually specify required arguments. Original error: %w", err) @@ -265,11 +265,19 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *catalog.ListConnectionsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq catalog.ListConnectionsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of connections to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "list" cmd.Short = `List connections.` cmd.Long = `List connections. @@ -278,11 +286,17 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response := w.Connections.List(ctx) + + response := w.Connections.List(ctx, listReq) return cmdio.RenderIterator(ctx, response) } @@ -292,7 +306,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd diff --git a/cmd/workspace/consumer-listings/consumer-listings.go b/cmd/workspace/consumer-listings/consumer-listings.go index f75f03b3a..8669dfae5 100755 --- a/cmd/workspace/consumer-listings/consumer-listings.go +++ b/cmd/workspace/consumer-listings/consumer-listings.go @@ -129,13 +129,14 @@ func newList() *cobra.Command { // TODO: array: assets // TODO: array: categories + cmd.Flags().BoolVar(&listReq.IsAscending, "is-ascending", listReq.IsAscending, ``) cmd.Flags().BoolVar(&listReq.IsFree, "is-free", listReq.IsFree, `Filters each listing based on if it is free.`) cmd.Flags().BoolVar(&listReq.IsPrivateExchange, "is-private-exchange", listReq.IsPrivateExchange, `Filters each listing based on if it is a private exchange.`) cmd.Flags().BoolVar(&listReq.IsStaffPick, "is-staff-pick", listReq.IsStaffPick, `Filters each listing based on whether it is a staff pick.`) cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) // TODO: array: provider_ids - // TODO: complex arg: sort_by_spec + cmd.Flags().Var(&listReq.SortBy, "sort-by", `Criteria for sorting the resulting set of listings. Supported values: [SORT_BY_DATE, SORT_BY_RELEVANCE, SORT_BY_TITLE, SORT_BY_UNSPECIFIED]`) // TODO: array: tags cmd.Use = "list" @@ -191,6 +192,7 @@ func newSearch() *cobra.Command { // TODO: array: assets // TODO: array: categories + cmd.Flags().BoolVar(&searchReq.IsAscending, "is-ascending", searchReq.IsAscending, ``) cmd.Flags().BoolVar(&searchReq.IsFree, "is-free", searchReq.IsFree, ``) cmd.Flags().BoolVar(&searchReq.IsPrivateExchange, "is-private-exchange", searchReq.IsPrivateExchange, ``) cmd.Flags().IntVar(&searchReq.PageSize, "page-size", searchReq.PageSize, ``) diff --git a/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go b/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go index 86b4244d5..a8acc5cd1 100755 --- a/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go +++ b/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go @@ -27,9 +27,6 @@ func New() *cobra.Command { If the compliance security profile is disabled, you can enable or disable this setting and it is not permanent.`, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/libraries/libraries.go b/cmd/workspace/libraries/libraries.go index aed8843dc..2c10d8161 100755 --- a/cmd/workspace/libraries/libraries.go +++ b/cmd/workspace/libraries/libraries.go @@ -80,11 +80,8 @@ func newAllClusterStatuses() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Libraries.AllClusterStatuses(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Libraries.AllClusterStatuses(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 5a55fd72b..f1cc4e3f7 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -945,6 +945,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Edition, "edition", updateReq.Edition, `Pipeline product edition.`) cmd.Flags().Int64Var(&updateReq.ExpectedLastModified, "expected-last-modified", updateReq.ExpectedLastModified, `If present, the last-modified time of the pipeline settings before the edit.`) // TODO: complex arg: filters + // TODO: complex arg: gateway_definition cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Unique identifier for this pipeline.`) // TODO: complex arg: ingestion_definition // TODO: array: libraries diff --git a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go b/cmd/workspace/quality-monitors/quality-monitors.go similarity index 95% rename from cmd/workspace/lakehouse-monitors/lakehouse-monitors.go rename to cmd/workspace/quality-monitors/quality-monitors.go index 465ed6f92..95d992164 100755 --- a/cmd/workspace/lakehouse-monitors/lakehouse-monitors.go +++ b/cmd/workspace/quality-monitors/quality-monitors.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package lakehouse_monitors +package quality_monitors import ( "fmt" @@ -18,7 +18,7 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "lakehouse-monitors", + Use: "quality-monitors", Short: `A monitor computes and monitors data or model quality metrics for a table over time.`, Long: `A monitor computes and monitors data or model quality metrics for a table over time. It generates metrics tables and a dashboard that you can use to monitor @@ -105,7 +105,7 @@ func newCancelRefresh() *cobra.Command { cancelRefreshReq.TableName = args[0] cancelRefreshReq.RefreshId = args[1] - err = w.LakehouseMonitors.CancelRefresh(ctx, cancelRefreshReq) + err = w.QualityMonitors.CancelRefresh(ctx, cancelRefreshReq) if err != nil { return err } @@ -208,7 +208,7 @@ func newCreate() *cobra.Command { createReq.OutputSchemaName = args[2] } - response, err := w.LakehouseMonitors.Create(ctx, createReq) + response, err := w.QualityMonitors.Create(ctx, createReq) if err != nil { return err } @@ -233,13 +233,13 @@ func newCreate() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteOverrides []func( *cobra.Command, - *catalog.DeleteLakehouseMonitorRequest, + *catalog.DeleteQualityMonitorRequest, ) func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteReq catalog.DeleteLakehouseMonitorRequest + var deleteReq catalog.DeleteQualityMonitorRequest // TODO: short flags @@ -278,7 +278,7 @@ func newDelete() *cobra.Command { deleteReq.TableName = args[0] - err = w.LakehouseMonitors.Delete(ctx, deleteReq) + err = w.QualityMonitors.Delete(ctx, deleteReq) if err != nil { return err } @@ -303,13 +303,13 @@ func newDelete() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *catalog.GetLakehouseMonitorRequest, + *catalog.GetQualityMonitorRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq catalog.GetLakehouseMonitorRequest + var getReq catalog.GetQualityMonitorRequest // TODO: short flags @@ -347,7 +347,7 @@ func newGet() *cobra.Command { getReq.TableName = args[0] - response, err := w.LakehouseMonitors.Get(ctx, getReq) + response, err := w.QualityMonitors.Get(ctx, getReq) if err != nil { return err } @@ -416,7 +416,7 @@ func newGetRefresh() *cobra.Command { getRefreshReq.TableName = args[0] getRefreshReq.RefreshId = args[1] - response, err := w.LakehouseMonitors.GetRefresh(ctx, getRefreshReq) + response, err := w.QualityMonitors.GetRefresh(ctx, getRefreshReq) if err != nil { return err } @@ -484,7 +484,7 @@ func newListRefreshes() *cobra.Command { listRefreshesReq.TableName = args[0] - response, err := w.LakehouseMonitors.ListRefreshes(ctx, listRefreshesReq) + response, err := w.QualityMonitors.ListRefreshes(ctx, listRefreshesReq) if err != nil { return err } @@ -552,7 +552,7 @@ func newRunRefresh() *cobra.Command { runRefreshReq.TableName = args[0] - response, err := w.LakehouseMonitors.RunRefresh(ctx, runRefreshReq) + response, err := w.QualityMonitors.RunRefresh(ctx, runRefreshReq) if err != nil { return err } @@ -591,6 +591,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.BaselineTableName, "baseline-table-name", updateReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`) // TODO: array: custom_metrics + cmd.Flags().StringVar(&updateReq.DashboardId, "dashboard-id", updateReq.DashboardId, `Id of dashboard that visualizes the computed metrics.`) // TODO: complex arg: data_classification_config // TODO: complex arg: inference_log // TODO: complex arg: notifications @@ -651,7 +652,7 @@ func newUpdate() *cobra.Command { updateReq.OutputSchemaName = args[1] } - response, err := w.LakehouseMonitors.Update(ctx, updateReq) + response, err := w.QualityMonitors.Update(ctx, updateReq) if err != nil { return err } @@ -670,4 +671,4 @@ func newUpdate() *cobra.Command { return cmd } -// end service LakehouseMonitors +// end service QualityMonitors diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index dee341ab4..b92f824d3 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -152,6 +152,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: rate_limits + cmd.Flags().BoolVar(&createReq.RouteOptimized, "route-optimized", createReq.RouteOptimized, `Enable route optimization for the serving endpoint.`) // TODO: array: tags cmd.Use = "create" @@ -303,11 +304,12 @@ func newExportMetrics() *cobra.Command { exportMetricsReq.Name = args[0] - err = w.ServingEndpoints.ExportMetrics(ctx, exportMetricsReq) + response, err := w.ServingEndpoints.ExportMetrics(ctx, exportMetricsReq) if err != nil { return err } - return nil + defer response.Contents.Close() + return cmdio.Render(ctx, response.Contents) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index 0e3523cec..c2fd779a7 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -67,6 +67,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) + cmd.Flags().StringVar(&createReq.StorageRoot, "storage-root", createReq.StorageRoot, `Storage root URL for the share.`) cmd.Use = "create NAME" cmd.Short = `Create a share.` @@ -368,6 +369,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the share.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of share.`) + cmd.Flags().StringVar(&updateReq.StorageRoot, "storage-root", updateReq.StorageRoot, `Storage root URL for the share.`) // TODO: array: updates cmd.Use = "update NAME" @@ -382,6 +384,9 @@ func newUpdate() *cobra.Command { In the case that the share name is changed, **updateShare** requires that the caller is both the share owner and a metastore admin. + If there are notebook files in the share, the __storage_root__ field cannot be + updated. + For each table that is added through this method, the share owner must also have **SELECT** privilege on the table. This privilege must be maintained indefinitely for recipients to be able to access the table. Typically, you diff --git a/cmd/workspace/system-schemas/system-schemas.go b/cmd/workspace/system-schemas/system-schemas.go index 070701d2f..3fe0580d7 100755 --- a/cmd/workspace/system-schemas/system-schemas.go +++ b/cmd/workspace/system-schemas/system-schemas.go @@ -3,8 +3,6 @@ package system_schemas import ( - "fmt" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/databricks-sdk-go/service/catalog" @@ -81,10 +79,7 @@ func newDisable() *cobra.Command { w := root.WorkspaceClient(ctx) disableReq.MetastoreId = args[0] - _, err = fmt.Sscan(args[1], &disableReq.SchemaName) - if err != nil { - return fmt.Errorf("invalid SCHEMA_NAME: %s", args[1]) - } + disableReq.SchemaName = args[1] err = w.SystemSchemas.Disable(ctx, disableReq) if err != nil { @@ -145,10 +140,7 @@ func newEnable() *cobra.Command { w := root.WorkspaceClient(ctx) enableReq.MetastoreId = args[0] - _, err = fmt.Sscan(args[1], &enableReq.SchemaName) - if err != nil { - return fmt.Errorf("invalid SCHEMA_NAME: %s", args[1]) - } + enableReq.SchemaName = args[1] err = w.SystemSchemas.Enable(ctx, enableReq) if err != nil { diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index 32e023d44..dff8176ea 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -42,6 +42,7 @@ func New() *cobra.Command { cmd.AddCommand(newGetIndex()) cmd.AddCommand(newListIndexes()) cmd.AddCommand(newQueryIndex()) + cmd.AddCommand(newScanIndex()) cmd.AddCommand(newSyncIndex()) cmd.AddCommand(newUpsertDataVectorIndex()) @@ -468,6 +469,76 @@ func newQueryIndex() *cobra.Command { return cmd } +// start scan-index command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var scanIndexOverrides []func( + *cobra.Command, + *vectorsearch.ScanVectorIndexRequest, +) + +func newScanIndex() *cobra.Command { + cmd := &cobra.Command{} + + var scanIndexReq vectorsearch.ScanVectorIndexRequest + var scanIndexJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&scanIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&scanIndexReq.LastPrimaryKey, "last-primary-key", scanIndexReq.LastPrimaryKey, `Primary key of the last entry returned in the previous scan.`) + cmd.Flags().IntVar(&scanIndexReq.NumResults, "num-results", scanIndexReq.NumResults, `Number of results to return.`) + + cmd.Use = "scan-index INDEX_NAME" + cmd.Short = `Scan an index.` + cmd.Long = `Scan an index. + + Scan the specified vector index and return the first num_results entries + after the exclusive primary_key. + + Arguments: + INDEX_NAME: Name of the vector index to scan.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = scanIndexJson.Unmarshal(&scanIndexReq) + if err != nil { + return err + } + } + scanIndexReq.IndexName = args[0] + + response, err := w.VectorSearchIndexes.ScanIndex(ctx, scanIndexReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range scanIndexOverrides { + fn(cmd, &scanIndexReq) + } + + return cmd +} + // start sync-index command // Slice with functions to override default command behavior. diff --git a/go.mod b/go.mod index ddebe9727..84933de2b 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.40.1 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.41.0 // Apache 2.0 github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 1dccbb2f9..16f9c9a1f 100644 --- a/go.sum +++ b/go.sum @@ -28,8 +28,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.40.1 h1:rE5yP9gIW2oap+6CnumixnZSDIsXwVojAuDBuKUl5GU= -github.com/databricks/databricks-sdk-go v0.40.1/go.mod h1:rLIhh7DvifVLmf2QxMr/vMRGqdrTZazn8VYo4LilfCo= +github.com/databricks/databricks-sdk-go v0.41.0 h1:OyhYY+Q6+gqkWeXmpGEiacoU2RStTeWPF0x4vmqbQdc= +github.com/databricks/databricks-sdk-go v0.41.0/go.mod h1:rLIhh7DvifVLmf2QxMr/vMRGqdrTZazn8VYo4LilfCo= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 46f6cbcfc37d6dac836b027f5d60d824d7fdd16e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 22 May 2024 11:08:27 +0200 Subject: [PATCH 199/286] Release v0.220.0 (#1446) CLI: * Add line about Docker installation to README.md ([#1363](https://github.com/databricks/cli/pull/1363)). * Improve token refresh flow ([#1434](https://github.com/databricks/cli/pull/1434)). Bundles: * Upgrade Terraform provider to v1.42.0 ([#1418](https://github.com/databricks/cli/pull/1418)). * Upgrade Terraform provider to v1.43.0 ([#1429](https://github.com/databricks/cli/pull/1429)). * Don't merge-in remote resources during deployments ([#1432](https://github.com/databricks/cli/pull/1432)). * Remove dependency on `ConfigFilePath` from path translation mutator ([#1437](https://github.com/databricks/cli/pull/1437)). * Add `merge.Override` transform ([#1428](https://github.com/databricks/cli/pull/1428)). * Fixed panic when loading incorrectly defined jobs ([#1402](https://github.com/databricks/cli/pull/1402)). * Add more tests for `merge.Override` ([#1439](https://github.com/databricks/cli/pull/1439)). * Fixed seg fault when specifying environment key for tasks ([#1443](https://github.com/databricks/cli/pull/1443)). * Fix conversion of zero valued scalar pointers to a dynamic value ([#1433](https://github.com/databricks/cli/pull/1433)). Internal: * Don't hide commands of services that are already hidden ([#1438](https://github.com/databricks/cli/pull/1438)). API Changes: * Renamed `lakehouse-monitors` command group to `quality-monitors`. * Added `apps` command group. * Renamed `csp-enablement` command group to `compliance-security-profile`. * Renamed `esm-enablement` command group to `enhanced-security-monitoring`. * Added `databricks vector-search-indexes scan-index` command. OpenAPI commit 7eb5ad9a2ed3e3f1055968a2d1014ac92c06fe92 (2024-05-21) Dependency updates: * Bump golang.org/x/text from 0.14.0 to 0.15.0 ([#1419](https://github.com/databricks/cli/pull/1419)). * Bump golang.org/x/oauth2 from 0.19.0 to 0.20.0 ([#1421](https://github.com/databricks/cli/pull/1421)). * Bump golang.org/x/term from 0.19.0 to 0.20.0 ([#1422](https://github.com/databricks/cli/pull/1422)). * Bump github.com/databricks/databricks-sdk-go from 0.39.0 to 0.40.1 ([#1431](https://github.com/databricks/cli/pull/1431)). * Bump github.com/fatih/color from 1.16.0 to 1.17.0 ([#1441](https://github.com/databricks/cli/pull/1441)). * Bump github.com/hashicorp/terraform-json from 0.21.0 to 0.22.1 ([#1440](https://github.com/databricks/cli/pull/1440)). * Bump github.com/hashicorp/terraform-exec from 0.20.0 to 0.21.0 ([#1442](https://github.com/databricks/cli/pull/1442)). * Update Go SDK to v0.41.0 ([#1445](https://github.com/databricks/cli/pull/1445)). --- CHANGELOG.md | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1bd824daf..2fb35d479 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,44 @@ # Version changelog +## 0.220.0 + +CLI: + * Add line about Docker installation to README.md ([#1363](https://github.com/databricks/cli/pull/1363)). + * Improve token refresh flow ([#1434](https://github.com/databricks/cli/pull/1434)). + +Bundles: + * Upgrade Terraform provider to v1.42.0 ([#1418](https://github.com/databricks/cli/pull/1418)). + * Upgrade Terraform provider to v1.43.0 ([#1429](https://github.com/databricks/cli/pull/1429)). + * Don't merge-in remote resources during deployments ([#1432](https://github.com/databricks/cli/pull/1432)). + * Remove dependency on `ConfigFilePath` from path translation mutator ([#1437](https://github.com/databricks/cli/pull/1437)). + * Add `merge.Override` transform ([#1428](https://github.com/databricks/cli/pull/1428)). + * Fixed panic when loading incorrectly defined jobs ([#1402](https://github.com/databricks/cli/pull/1402)). + * Add more tests for `merge.Override` ([#1439](https://github.com/databricks/cli/pull/1439)). + * Fixed seg fault when specifying environment key for tasks ([#1443](https://github.com/databricks/cli/pull/1443)). + * Fix conversion of zero valued scalar pointers to a dynamic value ([#1433](https://github.com/databricks/cli/pull/1433)). + +Internal: + * Don't hide commands of services that are already hidden ([#1438](https://github.com/databricks/cli/pull/1438)). + +API Changes: + * Renamed `lakehouse-monitors` command group to `quality-monitors`. + * Added `apps` command group. + * Renamed `csp-enablement` command group to `compliance-security-profile`. + * Renamed `esm-enablement` command group to `enhanced-security-monitoring`. + * Added `databricks vector-search-indexes scan-index` command. + +OpenAPI commit 7eb5ad9a2ed3e3f1055968a2d1014ac92c06fe92 (2024-05-21) + +Dependency updates: + * Bump golang.org/x/text from 0.14.0 to 0.15.0 ([#1419](https://github.com/databricks/cli/pull/1419)). + * Bump golang.org/x/oauth2 from 0.19.0 to 0.20.0 ([#1421](https://github.com/databricks/cli/pull/1421)). + * Bump golang.org/x/term from 0.19.0 to 0.20.0 ([#1422](https://github.com/databricks/cli/pull/1422)). + * Bump github.com/databricks/databricks-sdk-go from 0.39.0 to 0.40.1 ([#1431](https://github.com/databricks/cli/pull/1431)). + * Bump github.com/fatih/color from 1.16.0 to 1.17.0 ([#1441](https://github.com/databricks/cli/pull/1441)). + * Bump github.com/hashicorp/terraform-json from 0.21.0 to 0.22.1 ([#1440](https://github.com/databricks/cli/pull/1440)). + * Bump github.com/hashicorp/terraform-exec from 0.20.0 to 0.21.0 ([#1442](https://github.com/databricks/cli/pull/1442)). + * Update Go SDK to v0.41.0 ([#1445](https://github.com/databricks/cli/pull/1445)). + ## 0.219.0 Bundles: From 9a452f38ee4654e0f5d96905f9b4dae40e547a96 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 May 2024 14:20:11 +0200 Subject: [PATCH 200/286] Bump github.com/hashicorp/go-version from 1.6.0 to 1.7.0 (#1454) Bumps [github.com/hashicorp/go-version](https://github.com/hashicorp/go-version) from 1.6.0 to 1.7.0.
Release notes

Sourced from github.com/hashicorp/go-version's releases.

v1.7.0

ENHANCEMENTS:

  • Remove reflect dependency (#91)
  • Implement the database/sql.Scanner and database/sql/driver.Value interfaces for Version (#133)

INTERNAL:

  • [COMPLIANCE] Add Copyright and License Headers (#115)
  • [COMPLIANCE] Update MPL-2.0 LICENSE (#105)
  • Bump actions/cache from 3.0.11 to 3.2.5 (#116)
  • Bump actions/checkout from 3.2.0 to 3.3.0 (#111)
  • Bump actions/upload-artifact from 3.1.1 to 3.1.2 (#112)
  • GHA Migration (#103)
  • github: Pin external GitHub Actions to hashes (#107)
  • SEC-090: Automated trusted workflow pinning (2023-04-05) (#124)
  • update readme (#104)
Changelog

Sourced from github.com/hashicorp/go-version's changelog.

1.7.0 (May 24, 2024)

ENHANCEMENTS:

  • Remove reflect dependency (#91)
  • Implement the database/sql.Scanner and database/sql/driver.Value interfaces for Version (#133)

INTERNAL:

  • [COMPLIANCE] Add Copyright and License Headers (#115)
  • [COMPLIANCE] Update MPL-2.0 LICENSE (#105)
  • Bump actions/cache from 3.0.11 to 3.2.5 (#116)
  • Bump actions/checkout from 3.2.0 to 3.3.0 (#111)
  • Bump actions/upload-artifact from 3.1.1 to 3.1.2 (#112)
  • GHA Migration (#103)
  • github: Pin external GitHub Actions to hashes (#107)
  • SEC-090: Automated trusted workflow pinning (2023-04-05) (#124)
  • update readme (#104)
Commits
  • fcaa532 Update CHANGELOG.md
  • b85381a Update CHANGELOG.md
  • d55f214 Implement the Scan and driver.Value SQL interfaces (#133)
  • e04a866 remove reflection dependency (#91)
  • 94bab9e [COMPLIANCE] Add Copyright and License Headers (#115)
  • 73ddc63 github: Change Dependabot to only manage HashiCorp-owned Actions
  • bf1144e SEC-090: Automated trusted workflow pinning (2023-04-05) (#124)
  • 644291d Bump actions/cache from 3.0.11 to 3.2.5 (#116)
  • 8f6487b Bump actions/upload-artifact from 3.1.1 to 3.1.2 (#112)
  • 7f856b8 Bump actions/checkout from 3.2.0 to 3.3.0 (#111)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/go-version&package-manager=go_modules&previous-version=1.6.0&new-version=1.7.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 84933de2b..8ccbeffc0 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause - github.com/hashicorp/go-version v1.6.0 // MPL 2.0 + github.com/hashicorp/go-version v1.7.0 // MPL 2.0 github.com/hashicorp/hc-install v0.6.4 // MPL 2.0 github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0 diff --git a/go.sum b/go.sum index 16f9c9a1f..71bd69bd6 100644 --- a/go.sum +++ b/go.sum @@ -97,8 +97,8 @@ github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUh github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= From 13b937cea818b536ea5b740e4fff410bfe8d769d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 May 2024 14:41:20 +0200 Subject: [PATCH 201/286] Bump github.com/hashicorp/hc-install from 0.6.4 to 0.7.0 (#1453) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/hashicorp/hc-install](https://github.com/hashicorp/hc-install) from 0.6.4 to 0.7.0.
Release notes

Sourced from github.com/hashicorp/hc-install's releases.

v0.7.0

ENHANCEMENTS:

BUG FIXES:

DEPENDENCIES:

INTERNAL:

New Contributors

Full Changelog: https://github.com/hashicorp/hc-install/compare/v0.6.4...v0.7.0

Commits
  • 152a3b6 Release v0.7.0
  • 237ac6f Ensure license files are tracked during installation so they can be removed (...
  • 5a74938 github: Create CODEOWNERS (#210)
  • 40acb8c build(deps): bump the github-actions-breaking group with 2 updates (#211)
  • b19d1fc build(deps): bump hashicorp/setup-copywrite from 1.1.2 to 1.1.3 in the github...
  • e094597 Result of tsccr-helper -log-level=info gha update -latest . (#209)
  • b5c313e build(deps): bump hashicorp/action-setup-bob (#208)
  • 35884ef github: Set up Dependabot to manage HashiCorp-owned Actions versioning (#207)
  • 704a29e Add support for custom download URLs (#203)
  • 7de7b37 Ensure license file gets packaged along w/ the CLI binary (#205)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/hc-install&package-manager=go_modules&previous-version=0.6.4&new-version=0.7.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8ccbeffc0..1b6c9aeb3 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/hashicorp/go-version v1.7.0 // MPL 2.0 - github.com/hashicorp/hc-install v0.6.4 // MPL 2.0 + github.com/hashicorp/hc-install v0.7.0 // MPL 2.0 github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0 github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 71bd69bd6..723057ad9 100644 --- a/go.sum +++ b/go.sum @@ -99,8 +99,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= -github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= +github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk= +github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= From b2ea9dd97134dfca601e05ce15c043afef3844b6 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 29 May 2024 17:30:26 +0200 Subject: [PATCH 202/286] Remove unnecessary `filepath.FromSlash` calls (#1458) ## Changes The prior join call calls `filepath.Join` which returns a cleaned result. Path cleaning, in turn, calls `filepath.FromSlash`. ## Tests * Unit tests. --- libs/filer/local_client.go | 6 ------ libs/filer/local_root_path.go | 1 - 2 files changed, 7 deletions(-) diff --git a/libs/filer/local_client.go b/libs/filer/local_client.go index 958b6277d..9398958f5 100644 --- a/libs/filer/local_client.go +++ b/libs/filer/local_client.go @@ -34,7 +34,6 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, flags |= os.O_EXCL } - absPath = filepath.FromSlash(absPath) f, err := os.OpenFile(absPath, flags, 0644) if os.IsNotExist(err) && slices.Contains(mode, CreateParentDirectories) { // Create parent directories if they don't exist. @@ -76,7 +75,6 @@ func (w *LocalClient) Read(ctx context.Context, name string) (io.ReadCloser, err // This stat call serves two purposes: // 1. Checks file at path exists, and throws an error if it does not // 2. Allows us to error out if the path is a directory - absPath = filepath.FromSlash(absPath) stat, err := os.Stat(absPath) if err != nil { if os.IsNotExist(err) { @@ -103,7 +101,6 @@ func (w *LocalClient) Delete(ctx context.Context, name string, mode ...DeleteMod return CannotDeleteRootError{} } - absPath = filepath.FromSlash(absPath) err = os.Remove(absPath) // Return early on success. @@ -131,7 +128,6 @@ func (w *LocalClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, return nil, err } - absPath = filepath.FromSlash(absPath) stat, err := os.Stat(absPath) if err != nil { if os.IsNotExist(err) { @@ -153,7 +149,6 @@ func (w *LocalClient) Mkdir(ctx context.Context, name string) error { return err } - dirPath = filepath.FromSlash(dirPath) return os.MkdirAll(dirPath, 0755) } @@ -163,7 +158,6 @@ func (w *LocalClient) Stat(ctx context.Context, name string) (fs.FileInfo, error return nil, err } - absPath = filepath.FromSlash(absPath) stat, err := os.Stat(absPath) if os.IsNotExist(err) { return nil, FileDoesNotExistError{path: absPath} diff --git a/libs/filer/local_root_path.go b/libs/filer/local_root_path.go index 15a542631..3f8843093 100644 --- a/libs/filer/local_root_path.go +++ b/libs/filer/local_root_path.go @@ -19,7 +19,6 @@ func NewLocalRootPath(root string) localRootPath { func (rp *localRootPath) Join(name string) (string, error) { absPath := filepath.Join(rp.rootPath, name) - if !strings.HasPrefix(absPath, rp.rootPath) { return "", fmt.Errorf("relative path escapes root: %s", name) } From 424499ec1d56db0ef153b68357bb49257c2fe6aa Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 30 May 2024 09:41:50 +0200 Subject: [PATCH 203/286] Abstract over filesystem interaction with libs/vfs (#1452) ## Changes Introduce `libs/vfs` for an implementation of `fs.FS` and friends that _includes_ the absolute path it is anchored to. This is needed for: 1. Intercepting file operations to inject custom logic (e.g., logging, access control). 2. Traversing directories to find specific leaf directories (e.g., `.git`). 3. Converting virtual paths to OS-native paths. Options 2 and 3 are not possible with the standard `fs.FS` interface. They are needed such that we can provide an instance to the sync package and still detect the containing `.git` directory and convert paths to native paths. This change focuses on making the following packages use `vfs.Path`: * libs/fileset * libs/git * libs/sync All entries returned by `fileset.All` are now slash-separated. This has 2 consequences: * The sync snapshot now always uses slash-separated paths * We don't need to call `filepath.FromSlash` as much as we did ## Tests * All unit tests pass * All integration tests pass * Manually confirmed that a deployment made on Windows by a previous version of the CLI can be deployed by a new version of the CLI while retaining the validity of the local sync snapshot as well as the remote deployment state. --- bundle/bundle.go | 3 +- bundle/config/mutator/load_git_details.go | 3 +- .../config/validate/validate_sync_patterns.go | 3 +- bundle/deploy/files/sync.go | 3 +- bundle/deploy/state.go | 13 ++- bundle/deploy/state_test.go | 23 ++---- cmd/sync/sync.go | 3 +- cmd/sync/sync_test.go | 11 ++- internal/sync_test.go | 6 +- libs/fileset/file.go | 44 ++++++---- libs/fileset/file_test.go | 15 ++-- libs/fileset/fileset.go | 36 ++++---- libs/fileset/glob.go | 19 ++--- libs/fileset/glob_test.go | 82 ++++++------------- libs/git/config.go | 9 +- libs/git/fileset.go | 7 +- libs/git/fileset_test.go | 18 ++-- libs/git/ignore.go | 25 ++++-- libs/git/ignore_test.go | 7 +- libs/git/reference.go | 13 ++- libs/git/reference_test.go | 9 +- libs/git/repository.go | 49 +++++------ libs/git/repository_test.go | 17 ++-- libs/git/view.go | 35 ++++---- libs/git/view_test.go | 29 +++---- libs/notebook/detect.go | 21 +++-- libs/notebook/detect_jupyter.go | 20 +++-- libs/sync/diff.go | 5 +- libs/sync/dirset.go | 5 +- libs/sync/snapshot.go | 5 ++ libs/sync/snapshot_state.go | 32 +++++++- libs/sync/snapshot_state_test.go | 39 +++++++-- libs/sync/snapshot_test.go | 13 +-- libs/sync/sync.go | 6 +- libs/sync/sync_test.go | 23 +++--- libs/sync/watchdog.go | 4 +- libs/vfs/leaf.go | 29 +++++++ libs/vfs/leaf_test.go | 38 +++++++++ libs/vfs/os.go | 82 +++++++++++++++++++ libs/vfs/os_test.go | 54 ++++++++++++ libs/vfs/path.go | 29 +++++++ libs/vfs/path_test.go | 1 + 42 files changed, 603 insertions(+), 285 deletions(-) create mode 100644 libs/vfs/leaf.go create mode 100644 libs/vfs/leaf_test.go create mode 100644 libs/vfs/os.go create mode 100644 libs/vfs/os_test.go create mode 100644 libs/vfs/path.go create mode 100644 libs/vfs/path_test.go diff --git a/bundle/bundle.go b/bundle/bundle.go index 977ca2247..1dc98656a 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -22,6 +22,7 @@ import ( "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/tags" "github.com/databricks/cli/libs/terraform" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" sdkconfig "github.com/databricks/databricks-sdk-go/config" "github.com/hashicorp/terraform-exec/tfexec" @@ -208,7 +209,7 @@ func (b *Bundle) GitRepository() (*git.Repository, error) { return nil, fmt.Errorf("unable to locate repository root: %w", err) } - return git.NewRepository(rootPath) + return git.NewRepository(vfs.MustNew(rootPath)) } // AuthEnv returns a map with environment variables and their values diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index 7ce8476f1..d8b76f39e 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/vfs" ) type loadGitDetails struct{} @@ -22,7 +23,7 @@ func (m *loadGitDetails) Name() string { func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Load relevant git repository - repo, err := git.NewRepository(b.RootPath) + repo, err := git.NewRepository(vfs.MustNew(b.RootPath)) if err != nil { return diag.FromErr(err) } diff --git a/bundle/config/validate/validate_sync_patterns.go b/bundle/config/validate/validate_sync_patterns.go index 58acf6ae4..832efede9 100644 --- a/bundle/config/validate/validate_sync_patterns.go +++ b/bundle/config/validate/validate_sync_patterns.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" "golang.org/x/sync/errgroup" ) @@ -50,7 +51,7 @@ func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (di index := i p := pattern errs.Go(func() error { - fs, err := fileset.NewGlobSet(rb.RootPath(), []string{p}) + fs, err := fileset.NewGlobSet(vfs.MustNew(rb.RootPath()), []string{p}) if err != nil { return err } diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index d78ab2d74..8d6efdae3 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/sync" + "github.com/databricks/cli/libs/vfs" ) func GetSync(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.Sync, error) { @@ -28,7 +29,7 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp } opts := &sync.SyncOptions{ - LocalPath: rb.RootPath(), + LocalPath: vfs.MustNew(rb.RootPath()), RemotePath: rb.Config().Workspace.FilePath, Include: includes, Exclude: rb.Config().Sync.Exclude, diff --git a/bundle/deploy/state.go b/bundle/deploy/state.go index ffcadc9d6..ccff64fe7 100644 --- a/bundle/deploy/state.go +++ b/bundle/deploy/state.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" ) const DeploymentStateFileName = "deployment.json" @@ -112,12 +113,18 @@ func FromSlice(files []fileset.File) (Filelist, error) { func (f Filelist) ToSlice(basePath string) []fileset.File { var files []fileset.File + root := vfs.MustNew(basePath) for _, file := range f { - absPath := filepath.Join(basePath, file.LocalPath) + entry := newEntry(filepath.Join(basePath, file.LocalPath)) + + // Snapshots created with versions <= v0.220.0 use platform-specific + // paths (i.e. with backslashes). Files returned by [libs/fileset] always + // contain forward slashes after this version. Normalize before using. + relative := filepath.ToSlash(file.LocalPath) if file.IsNotebook { - files = append(files, fileset.NewNotebookFile(newEntry(absPath), absPath, file.LocalPath)) + files = append(files, fileset.NewNotebookFile(root, entry, relative)) } else { - files = append(files, fileset.NewSourceFile(newEntry(absPath), absPath, file.LocalPath)) + files = append(files, fileset.NewSourceFile(root, entry, relative)) } } return files diff --git a/bundle/deploy/state_test.go b/bundle/deploy/state_test.go index 15bdc96b4..efa051ab6 100644 --- a/bundle/deploy/state_test.go +++ b/bundle/deploy/state_test.go @@ -3,17 +3,17 @@ package deploy import ( "bytes" "encoding/json" - "path/filepath" "testing" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/require" ) func TestFromSlice(t *testing.T) { tmpDir := t.TempDir() - fileset := fileset.New(tmpDir) + fileset := fileset.New(vfs.MustNew(tmpDir)) testutil.Touch(t, tmpDir, "test1.py") testutil.Touch(t, tmpDir, "test2.py") testutil.Touch(t, tmpDir, "test3.py") @@ -32,7 +32,7 @@ func TestFromSlice(t *testing.T) { func TestToSlice(t *testing.T) { tmpDir := t.TempDir() - fileset := fileset.New(tmpDir) + fileset := fileset.New(vfs.MustNew(tmpDir)) testutil.Touch(t, tmpDir, "test1.py") testutil.Touch(t, tmpDir, "test2.py") testutil.Touch(t, tmpDir, "test3.py") @@ -48,18 +48,11 @@ func TestToSlice(t *testing.T) { require.Len(t, s, 3) for _, file := range s { - require.Contains(t, []string{"test1.py", "test2.py", "test3.py"}, file.Name()) - require.Contains(t, []string{ - filepath.Join(tmpDir, "test1.py"), - filepath.Join(tmpDir, "test2.py"), - filepath.Join(tmpDir, "test3.py"), - }, file.Absolute) - require.False(t, file.IsDir()) - require.NotZero(t, file.Type()) - info, err := file.Info() - require.NoError(t, err) - require.NotNil(t, info) - require.Equal(t, file.Name(), info.Name()) + require.Contains(t, []string{"test1.py", "test2.py", "test3.py"}, file.Relative) + + // If the mtime is not zero we know we produced a valid fs.DirEntry. + ts := file.Modified() + require.NotZero(t, ts) } } diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index 42550722b..e5f1bfc9e 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -14,6 +14,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/sync" + "github.com/databricks/cli/libs/vfs" "github.com/spf13/cobra" ) @@ -46,7 +47,7 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn } opts := sync.SyncOptions{ - LocalPath: args[0], + LocalPath: vfs.MustNew(args[0]), RemotePath: args[1], Full: f.full, PollInterval: f.interval, diff --git a/cmd/sync/sync_test.go b/cmd/sync/sync_test.go index 026d840f7..b741e7b16 100644 --- a/cmd/sync/sync_test.go +++ b/cmd/sync/sync_test.go @@ -31,7 +31,7 @@ func TestSyncOptionsFromBundle(t *testing.T) { f := syncFlags{} opts, err := f.syncOptionsFromBundle(New(), []string{}, b) require.NoError(t, err) - assert.Equal(t, tempDir, opts.LocalPath) + assert.Equal(t, tempDir, opts.LocalPath.Native()) assert.Equal(t, "/Users/jane@doe.com/path", opts.RemotePath) assert.Equal(t, filepath.Join(tempDir, ".databricks", "bundle", "default"), opts.SnapshotBasePath) assert.NotNil(t, opts.WorkspaceClient) @@ -49,11 +49,14 @@ func TestSyncOptionsFromArgsRequiredTwoArgs(t *testing.T) { } func TestSyncOptionsFromArgs(t *testing.T) { + local := t.TempDir() + remote := "/remote" + f := syncFlags{} cmd := New() cmd.SetContext(root.SetWorkspaceClient(context.Background(), nil)) - opts, err := f.syncOptionsFromArgs(cmd, []string{"/local", "/remote"}) + opts, err := f.syncOptionsFromArgs(cmd, []string{local, remote}) require.NoError(t, err) - assert.Equal(t, "/local", opts.LocalPath) - assert.Equal(t, "/remote", opts.RemotePath) + assert.Equal(t, local, opts.LocalPath.Native()) + assert.Equal(t, remote, opts.RemotePath) } diff --git a/internal/sync_test.go b/internal/sync_test.go index f970a7ce0..4021e6490 100644 --- a/internal/sync_test.go +++ b/internal/sync_test.go @@ -313,7 +313,7 @@ func TestAccSyncNestedFolderSync(t *testing.T) { assertSync.remoteDirContent(ctx, "dir1", []string{"dir2"}) assertSync.remoteDirContent(ctx, "dir1/dir2", []string{"dir3"}) assertSync.remoteDirContent(ctx, "dir1/dir2/dir3", []string{"foo.txt"}) - assertSync.snapshotContains(append(repoFiles, ".gitignore", filepath.FromSlash("dir1/dir2/dir3/foo.txt"))) + assertSync.snapshotContains(append(repoFiles, ".gitignore", "dir1/dir2/dir3/foo.txt")) // delete f.Remove(t) @@ -374,7 +374,7 @@ func TestAccSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { assertSync.remoteDirContent(ctx, "dir1", []string{"a b+c"}) assertSync.remoteDirContent(ctx, "dir1/a b+c", []string{"c+d e"}) assertSync.remoteDirContent(ctx, "dir1/a b+c/c+d e", []string{"e+f g#i.txt"}) - assertSync.snapshotContains(append(repoFiles, ".gitignore", filepath.FromSlash("dir1/a b+c/c+d e/e+f g#i.txt"))) + assertSync.snapshotContains(append(repoFiles, ".gitignore", "dir1/a b+c/c+d e/e+f g#i.txt")) // delete f.Remove(t) @@ -404,7 +404,7 @@ func TestAccSyncIncrementalFileOverwritesFolder(t *testing.T) { assertSync.waitForCompletionMarker() assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "foo")) assertSync.remoteDirContent(ctx, "foo", []string{"bar.txt"}) - assertSync.snapshotContains(append(repoFiles, ".gitignore", filepath.FromSlash("foo/bar.txt"))) + assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo/bar.txt")) // delete foo/bar.txt f.Remove(t) diff --git a/libs/fileset/file.go b/libs/fileset/file.go index 17cae7952..fd846b257 100644 --- a/libs/fileset/file.go +++ b/libs/fileset/file.go @@ -5,6 +5,7 @@ import ( "time" "github.com/databricks/cli/libs/notebook" + "github.com/databricks/cli/libs/vfs" ) type fileType int @@ -16,40 +17,49 @@ const ( ) type File struct { - fs.DirEntry - Absolute, Relative string - fileType fileType + // Root path of the fileset. + root vfs.Path + + // File entry as returned by the [fs.WalkDir] function. + entry fs.DirEntry + + // Type of the file. + fileType fileType + + // Relative path within the fileset. + // Combine with the [vfs.Path] to interact with the underlying file. + Relative string } -func NewNotebookFile(entry fs.DirEntry, absolute string, relative string) File { +func NewNotebookFile(root vfs.Path, entry fs.DirEntry, relative string) File { return File{ - DirEntry: entry, - Absolute: absolute, - Relative: relative, + root: root, + entry: entry, fileType: Notebook, + Relative: relative, } } -func NewFile(entry fs.DirEntry, absolute string, relative string) File { +func NewFile(root vfs.Path, entry fs.DirEntry, relative string) File { return File{ - DirEntry: entry, - Absolute: absolute, - Relative: relative, + root: root, + entry: entry, fileType: Unknown, + Relative: relative, } } -func NewSourceFile(entry fs.DirEntry, absolute string, relative string) File { +func NewSourceFile(root vfs.Path, entry fs.DirEntry, relative string) File { return File{ - DirEntry: entry, - Absolute: absolute, - Relative: relative, + root: root, + entry: entry, fileType: Source, + Relative: relative, } } func (f File) Modified() (ts time.Time) { - info, err := f.Info() + info, err := f.entry.Info() if err != nil { // return default time, beginning of epoch return ts @@ -63,7 +73,7 @@ func (f *File) IsNotebook() (bool, error) { } // Otherwise, detect the notebook type. - isNotebook, _, err := notebook.Detect(f.Absolute) + isNotebook, _, err := notebook.DetectWithFS(f.root, f.Relative) if err != nil { return false, err } diff --git a/libs/fileset/file_test.go b/libs/fileset/file_test.go index cdfc9ba17..1ce1ff59a 100644 --- a/libs/fileset/file_test.go +++ b/libs/fileset/file_test.go @@ -1,22 +1,22 @@ package fileset import ( - "path/filepath" "testing" "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/require" ) func TestNotebookFileIsNotebook(t *testing.T) { - f := NewNotebookFile(nil, "", "") + f := NewNotebookFile(nil, nil, "") isNotebook, err := f.IsNotebook() require.NoError(t, err) require.True(t, isNotebook) } func TestSourceFileIsNotNotebook(t *testing.T) { - f := NewSourceFile(nil, "", "") + f := NewSourceFile(nil, nil, "") isNotebook, err := f.IsNotebook() require.NoError(t, err) require.False(t, isNotebook) @@ -24,18 +24,19 @@ func TestSourceFileIsNotNotebook(t *testing.T) { func TestUnknownFileDetectsNotebook(t *testing.T) { tmpDir := t.TempDir() + root := vfs.MustNew(tmpDir) t.Run("file", func(t *testing.T) { - path := testutil.Touch(t, tmpDir, "test.py") - f := NewFile(nil, path, filepath.Base(path)) + testutil.Touch(t, tmpDir, "test.py") + f := NewFile(root, nil, "test.py") isNotebook, err := f.IsNotebook() require.NoError(t, err) require.False(t, isNotebook) }) t.Run("notebook", func(t *testing.T) { - path := testutil.TouchNotebook(t, tmpDir, "notebook.py") - f := NewFile(nil, path, filepath.Base(path)) + testutil.TouchNotebook(t, tmpDir, "notebook.py") + f := NewFile(root, nil, "notebook.py") isNotebook, err := f.IsNotebook() require.NoError(t, err) require.True(t, isNotebook) diff --git a/libs/fileset/fileset.go b/libs/fileset/fileset.go index 52463dff3..d0f00f97a 100644 --- a/libs/fileset/fileset.go +++ b/libs/fileset/fileset.go @@ -4,20 +4,24 @@ import ( "fmt" "io/fs" "os" - "path/filepath" + + "github.com/databricks/cli/libs/vfs" ) // FileSet facilitates fast recursive file listing of a path. // It optionally takes into account ignore rules through the [Ignorer] interface. type FileSet struct { - root string + // Root path of the fileset. + root vfs.Path + + // Ignorer interface to check if a file or directory should be ignored. ignore Ignorer } // New returns a [FileSet] for the given root path. -func New(root string) *FileSet { +func New(root vfs.Path) *FileSet { return &FileSet{ - root: filepath.Clean(root), + root: root, ignore: nopIgnorer{}, } } @@ -32,11 +36,6 @@ func (w *FileSet) SetIgnorer(ignore Ignorer) { w.ignore = ignore } -// Return root for fileset. -func (w *FileSet) Root() string { - return w.root -} - // Return all tracked files for Repo func (w *FileSet) All() ([]File, error) { return w.recursiveListFiles() @@ -46,12 +45,7 @@ func (w *FileSet) All() ([]File, error) { // that are being tracked in the FileSet (ie not being ignored for matching one of the // patterns in w.ignore) func (w *FileSet) recursiveListFiles() (fileList []File, err error) { - err = filepath.WalkDir(w.root, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - - relPath, err := filepath.Rel(w.root, path) + err = fs.WalkDir(w.root, ".", func(name string, d fs.DirEntry, err error) error { if err != nil { return err } @@ -66,25 +60,25 @@ func (w *FileSet) recursiveListFiles() (fileList []File, err error) { } if d.IsDir() { - ign, err := w.ignore.IgnoreDirectory(relPath) + ign, err := w.ignore.IgnoreDirectory(name) if err != nil { - return fmt.Errorf("cannot check if %s should be ignored: %w", relPath, err) + return fmt.Errorf("cannot check if %s should be ignored: %w", name, err) } if ign { - return filepath.SkipDir + return fs.SkipDir } return nil } - ign, err := w.ignore.IgnoreFile(relPath) + ign, err := w.ignore.IgnoreFile(name) if err != nil { - return fmt.Errorf("cannot check if %s should be ignored: %w", relPath, err) + return fmt.Errorf("cannot check if %s should be ignored: %w", name, err) } if ign { return nil } - fileList = append(fileList, NewFile(d, path, relPath)) + fileList = append(fileList, NewFile(w.root, d, name)) return nil }) return diff --git a/libs/fileset/glob.go b/libs/fileset/glob.go index 9d8626e54..0a1038472 100644 --- a/libs/fileset/glob.go +++ b/libs/fileset/glob.go @@ -1,22 +1,17 @@ package fileset import ( - "path/filepath" + "path" + + "github.com/databricks/cli/libs/vfs" ) -func NewGlobSet(root string, includes []string) (*FileSet, error) { - absRoot, err := filepath.Abs(root) - if err != nil { - return nil, err - } - +func NewGlobSet(root vfs.Path, includes []string) (*FileSet, error) { for k := range includes { - includes[k] = filepath.ToSlash(filepath.Clean(includes[k])) + includes[k] = path.Clean(includes[k]) } - fs := &FileSet{ - absRoot, - newIncluder(includes), - } + fs := New(root) + fs.SetIgnorer(newIncluder(includes)) return fs, nil } diff --git a/libs/fileset/glob_test.go b/libs/fileset/glob_test.go index e8d3696c4..70b9c444b 100644 --- a/libs/fileset/glob_test.go +++ b/libs/fileset/glob_test.go @@ -2,21 +2,26 @@ package fileset import ( "io/fs" - "os" - "path/filepath" + "path" "slices" "strings" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/require" ) -func TestGlobFileset(t *testing.T) { - cwd, err := os.Getwd() - require.NoError(t, err) - root := filepath.Join(cwd, "..", "filer") +func collectRelativePaths(files []File) []string { + relativePaths := make([]string, 0) + for _, f := range files { + relativePaths = append(relativePaths, f.Relative) + } + return relativePaths +} - entries, err := os.ReadDir(root) +func TestGlobFileset(t *testing.T) { + root := vfs.MustNew("../filer") + entries, err := root.ReadDir(".") require.NoError(t, err) g, err := NewGlobSet(root, []string{ @@ -30,7 +35,7 @@ func TestGlobFileset(t *testing.T) { require.Equal(t, len(files), len(entries)) for _, f := range files { exists := slices.ContainsFunc(entries, func(de fs.DirEntry) bool { - return de.Name() == f.Name() + return de.Name() == path.Base(f.Relative) }) require.True(t, exists) } @@ -46,9 +51,8 @@ func TestGlobFileset(t *testing.T) { } func TestGlobFilesetWithRelativeRoot(t *testing.T) { - root := filepath.Join("..", "filer") - - entries, err := os.ReadDir(root) + root := vfs.MustNew("../filer") + entries, err := root.ReadDir(".") require.NoError(t, err) g, err := NewGlobSet(root, []string{ @@ -58,21 +62,14 @@ func TestGlobFilesetWithRelativeRoot(t *testing.T) { files, err := g.All() require.NoError(t, err) - require.Equal(t, len(files), len(entries)) - for _, f := range files { - require.True(t, filepath.IsAbs(f.Absolute)) - } } func TestGlobFilesetRecursively(t *testing.T) { - cwd, err := os.Getwd() - require.NoError(t, err) - root := filepath.Join(cwd, "..", "git") - + root := vfs.MustNew("../git") entries := make([]string, 0) - err = filepath.Walk(filepath.Join(root, "testdata"), func(path string, info fs.FileInfo, err error) error { - if !info.IsDir() { + err := fs.WalkDir(root, "testdata", func(path string, d fs.DirEntry, err error) error { + if !d.IsDir() { entries = append(entries, path) } return nil @@ -86,24 +83,14 @@ func TestGlobFilesetRecursively(t *testing.T) { files, err := g.All() require.NoError(t, err) - - require.Equal(t, len(files), len(entries)) - for _, f := range files { - exists := slices.ContainsFunc(entries, func(path string) bool { - return path == f.Absolute - }) - require.True(t, exists) - } + require.ElementsMatch(t, entries, collectRelativePaths(files)) } func TestGlobFilesetDir(t *testing.T) { - cwd, err := os.Getwd() - require.NoError(t, err) - root := filepath.Join(cwd, "..", "git") - + root := vfs.MustNew("../git") entries := make([]string, 0) - err = filepath.Walk(filepath.Join(root, "testdata", "a"), func(path string, info fs.FileInfo, err error) error { - if !info.IsDir() { + err := fs.WalkDir(root, "testdata/a", func(path string, d fs.DirEntry, err error) error { + if !d.IsDir() { entries = append(entries, path) } return nil @@ -117,23 +104,13 @@ func TestGlobFilesetDir(t *testing.T) { files, err := g.All() require.NoError(t, err) - - require.Equal(t, len(files), len(entries)) - for _, f := range files { - exists := slices.ContainsFunc(entries, func(path string) bool { - return path == f.Absolute - }) - require.True(t, exists) - } + require.ElementsMatch(t, entries, collectRelativePaths(files)) } func TestGlobFilesetDoubleQuotesWithFilePatterns(t *testing.T) { - cwd, err := os.Getwd() - require.NoError(t, err) - root := filepath.Join(cwd, "..", "git") - + root := vfs.MustNew("../git") entries := make([]string, 0) - err = filepath.Walk(filepath.Join(root, "testdata"), func(path string, info fs.FileInfo, err error) error { + err := fs.WalkDir(root, "testdata", func(path string, d fs.DirEntry, err error) error { if strings.HasSuffix(path, ".txt") { entries = append(entries, path) } @@ -148,12 +125,5 @@ func TestGlobFilesetDoubleQuotesWithFilePatterns(t *testing.T) { files, err := g.All() require.NoError(t, err) - - require.Equal(t, len(files), len(entries)) - for _, f := range files { - exists := slices.ContainsFunc(entries, func(path string) bool { - return path == f.Absolute - }) - require.True(t, exists) - } + require.ElementsMatch(t, entries, collectRelativePaths(files)) } diff --git a/libs/git/config.go b/libs/git/config.go index e83c75b7b..424d453bc 100644 --- a/libs/git/config.go +++ b/libs/git/config.go @@ -8,6 +8,7 @@ import ( "regexp" "strings" + "github.com/databricks/cli/libs/vfs" "gopkg.in/ini.v1" ) @@ -87,8 +88,8 @@ func (c config) load(r io.Reader) error { return nil } -func (c config) loadFile(path string) error { - f, err := os.Open(path) +func (c config) loadFile(fs vfs.Path, path string) error { + f, err := fs.Open(path) if err != nil { // If the file doesn't exist it is ignored. // This is the case for both global and repository specific config files. @@ -152,8 +153,8 @@ func globalGitConfig() (*config, error) { // > are missing or unreadable they will be ignored. // // We therefore ignore the error return value for the calls below. - config.loadFile(filepath.Join(xdgConfigHome, "git/config")) - config.loadFile(filepath.Join(config.home, ".gitconfig")) + config.loadFile(vfs.MustNew(xdgConfigHome), "git/config") + config.loadFile(vfs.MustNew(config.home), ".gitconfig") return config, nil } diff --git a/libs/git/fileset.go b/libs/git/fileset.go index c604ac7fa..f1986aa20 100644 --- a/libs/git/fileset.go +++ b/libs/git/fileset.go @@ -2,6 +2,7 @@ package git import ( "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" ) // FileSet is Git repository aware implementation of [fileset.FileSet]. @@ -13,7 +14,7 @@ type FileSet struct { } // NewFileSet returns [FileSet] for the Git repository located at `root`. -func NewFileSet(root string) (*FileSet, error) { +func NewFileSet(root vfs.Path) (*FileSet, error) { fs := fileset.New(root) v, err := NewView(root) if err != nil { @@ -34,10 +35,6 @@ func (f *FileSet) IgnoreDirectory(dir string) (bool, error) { return f.view.IgnoreDirectory(dir) } -func (f *FileSet) Root() string { - return f.fileset.Root() -} - func (f *FileSet) All() ([]fileset.File, error) { f.view.repo.taintIgnoreRules() return f.fileset.All() diff --git a/libs/git/fileset_test.go b/libs/git/fileset_test.go index 74133f525..4e6172bfd 100644 --- a/libs/git/fileset_test.go +++ b/libs/git/fileset_test.go @@ -2,23 +2,25 @@ package git import ( "os" + "path" "path/filepath" "strings" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func testFileSetAll(t *testing.T, path string) { - fileSet, err := NewFileSet(path) +func testFileSetAll(t *testing.T, root string) { + fileSet, err := NewFileSet(vfs.MustNew(root)) require.NoError(t, err) files, err := fileSet.All() require.NoError(t, err) require.Len(t, files, 3) - assert.Equal(t, filepath.Join("a", "b", "world.txt"), files[0].Relative) - assert.Equal(t, filepath.Join("a", "hello.txt"), files[1].Relative) - assert.Equal(t, filepath.Join("databricks.yml"), files[2].Relative) + assert.Equal(t, path.Join("a", "b", "world.txt"), files[0].Relative) + assert.Equal(t, path.Join("a", "hello.txt"), files[1].Relative) + assert.Equal(t, path.Join("databricks.yml"), files[2].Relative) } func TestFileSetListAllInRepo(t *testing.T) { @@ -33,7 +35,7 @@ func TestFileSetNonCleanRoot(t *testing.T) { // Test what happens if the root directory can be simplified. // Path simplification is done by most filepath functions. // This should yield the same result as above test. - fileSet, err := NewFileSet("./testdata/../testdata") + fileSet, err := NewFileSet(vfs.MustNew("./testdata/../testdata")) require.NoError(t, err) files, err := fileSet.All() require.NoError(t, err) @@ -42,7 +44,7 @@ func TestFileSetNonCleanRoot(t *testing.T) { func TestFileSetAddsCacheDirToGitIgnore(t *testing.T) { projectDir := t.TempDir() - fileSet, err := NewFileSet(projectDir) + fileSet, err := NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) fileSet.EnsureValidGitIgnoreExists() @@ -57,7 +59,7 @@ func TestFileSetDoesNotCacheDirToGitIgnoreIfAlreadyPresent(t *testing.T) { projectDir := t.TempDir() gitIgnorePath := filepath.Join(projectDir, ".gitignore") - fileSet, err := NewFileSet(projectDir) + fileSet, err := NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) err = os.WriteFile(gitIgnorePath, []byte(".databricks"), 0o644) require.NoError(t, err) diff --git a/libs/git/ignore.go b/libs/git/ignore.go index ec66a2b23..df3a4e919 100644 --- a/libs/git/ignore.go +++ b/libs/git/ignore.go @@ -1,9 +1,12 @@ package git import ( + "io/fs" "os" + "strings" "time" + "github.com/databricks/cli/libs/vfs" ignore "github.com/sabhiram/go-gitignore" ) @@ -21,7 +24,8 @@ type ignoreRules interface { // ignoreFile represents a gitignore file backed by a path. // If the path doesn't exist (yet), it is treated as an empty file. type ignoreFile struct { - absPath string + root vfs.Path + path string // Signal a reload of this file. // Set this to call [os.Stat] and a potential reload @@ -35,9 +39,10 @@ type ignoreFile struct { patterns *ignore.GitIgnore } -func newIgnoreFile(absPath string) ignoreRules { +func newIgnoreFile(root vfs.Path, path string) ignoreRules { return &ignoreFile{ - absPath: absPath, + root: root, + path: path, checkForReload: true, } } @@ -67,7 +72,7 @@ func (f *ignoreFile) Taint() { func (f *ignoreFile) load() error { // The file must be stat-able. // If it doesn't exist, treat it as an empty file. - stat, err := os.Stat(f.absPath) + stat, err := fs.Stat(f.root, f.path) if err != nil { if os.IsNotExist(err) { return nil @@ -82,7 +87,7 @@ func (f *ignoreFile) load() error { } f.modTime = stat.ModTime() - f.patterns, err = ignore.CompileIgnoreFile(f.absPath) + f.patterns, err = f.loadGitignore() if err != nil { return err } @@ -90,6 +95,16 @@ func (f *ignoreFile) load() error { return nil } +func (f *ignoreFile) loadGitignore() (*ignore.GitIgnore, error) { + data, err := fs.ReadFile(f.root, f.path) + if err != nil { + return nil, err + } + + lines := strings.Split(string(data), "\n") + return ignore.CompileIgnoreLines(lines...), nil +} + // stringIgnoreRules implements the [ignoreRules] interface // for a set of in-memory ignore patterns. type stringIgnoreRules struct { diff --git a/libs/git/ignore_test.go b/libs/git/ignore_test.go index 160f53d7b..057c0cb2e 100644 --- a/libs/git/ignore_test.go +++ b/libs/git/ignore_test.go @@ -5,6 +5,7 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -13,7 +14,7 @@ func TestIgnoreFile(t *testing.T) { var ign bool var err error - f := newIgnoreFile("./testdata/.gitignore") + f := newIgnoreFile(vfs.MustNew("testdata"), ".gitignore") ign, err = f.MatchesPath("root.foo") require.NoError(t, err) assert.True(t, ign) @@ -27,7 +28,7 @@ func TestIgnoreFileDoesntExist(t *testing.T) { var err error // Files that don't exist are treated as an empty gitignore file. - f := newIgnoreFile("./testdata/thispathdoesntexist") + f := newIgnoreFile(vfs.MustNew("testdata"), "thispathdoesntexist") ign, err = f.MatchesPath("i'm included") require.NoError(t, err) assert.False(t, ign) @@ -41,7 +42,7 @@ func TestIgnoreFileTaint(t *testing.T) { gitIgnorePath := filepath.Join(tempDir, ".gitignore") // Files that don't exist are treated as an empty gitignore file. - f := newIgnoreFile(gitIgnorePath) + f := newIgnoreFile(vfs.MustNew(tempDir), ".gitignore") ign, err = f.MatchesPath("hello") require.NoError(t, err) assert.False(t, ign) diff --git a/libs/git/reference.go b/libs/git/reference.go index 4021f2e60..2b4bd3e4d 100644 --- a/libs/git/reference.go +++ b/libs/git/reference.go @@ -2,10 +2,12 @@ package git import ( "fmt" + "io/fs" "os" - "path/filepath" "regexp" "strings" + + "github.com/databricks/cli/libs/vfs" ) type ReferenceType string @@ -37,9 +39,9 @@ func isSHA1(s string) bool { return re.MatchString(s) } -func LoadReferenceFile(path string) (*Reference, error) { +func LoadReferenceFile(root vfs.Path, path string) (*Reference, error) { // read reference file content - b, err := os.ReadFile(path) + b, err := fs.ReadFile(root, path) if os.IsNotExist(err) { return nil, nil } @@ -73,8 +75,7 @@ func (ref *Reference) ResolvePath() (string, error) { if ref.Type != ReferenceTypePointer { return "", ErrNotAReferencePointer } - refPath := strings.TrimPrefix(ref.Content, ReferencePrefix) - return filepath.FromSlash(refPath), nil + return strings.TrimPrefix(ref.Content, ReferencePrefix), nil } // resolves the name of the current branch from the reference file content. For example @@ -87,8 +88,6 @@ func (ref *Reference) CurrentBranch() (string, error) { if err != nil { return "", err } - // normalize branch ref path to work accross different operating systems - branchRefPath = filepath.ToSlash(branchRefPath) if !strings.HasPrefix(branchRefPath, HeadPathPrefix) { return "", fmt.Errorf("reference path %s does not have expected prefix %s", branchRefPath, HeadPathPrefix) } diff --git a/libs/git/reference_test.go b/libs/git/reference_test.go index 1b08e989b..194d79333 100644 --- a/libs/git/reference_test.go +++ b/libs/git/reference_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -45,7 +46,7 @@ func TestReferenceReferencePathForReference(t *testing.T) { } path, err := ref.ResolvePath() assert.NoError(t, err) - assert.Equal(t, filepath.FromSlash("refs/heads/my-branch"), path) + assert.Equal(t, "refs/heads/my-branch", path) } func TestReferenceLoadingForObjectID(t *testing.T) { @@ -55,7 +56,7 @@ func TestReferenceLoadingForObjectID(t *testing.T) { defer f.Close() f.WriteString(strings.Repeat("e", 40) + "\r\n") - ref, err := LoadReferenceFile(filepath.Join(tmp, "HEAD")) + ref, err := LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.NoError(t, err) assert.Equal(t, ReferenceTypeSHA1, ref.Type) assert.Equal(t, strings.Repeat("e", 40), ref.Content) @@ -68,7 +69,7 @@ func TestReferenceLoadingForReference(t *testing.T) { defer f.Close() f.WriteString("ref: refs/heads/foo\n") - ref, err := LoadReferenceFile(filepath.Join(tmp, "HEAD")) + ref, err := LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.NoError(t, err) assert.Equal(t, ReferenceTypePointer, ref.Type) assert.Equal(t, "ref: refs/heads/foo", ref.Content) @@ -81,7 +82,7 @@ func TestReferenceLoadingFailsForInvalidContent(t *testing.T) { defer f.Close() f.WriteString("abc") - _, err = LoadReferenceFile(filepath.Join(tmp, "HEAD")) + _, err = LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.ErrorContains(t, err, "unknown format for git HEAD") } diff --git a/libs/git/repository.go b/libs/git/repository.go index 531fd74e4..6baf26c2e 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -7,7 +7,7 @@ import ( "path/filepath" "strings" - "github.com/databricks/cli/libs/folders" + "github.com/databricks/cli/libs/vfs" ) const gitIgnoreFileName = ".gitignore" @@ -21,8 +21,8 @@ type Repository struct { // directory where we process .gitignore files. real bool - // rootPath is the absolute path to the repository root. - rootPath string + // root is the absolute path to the repository root. + root vfs.Path // ignore contains a list of ignore patterns indexed by the // path prefix relative to the repository root. @@ -42,12 +42,12 @@ type Repository struct { // Root returns the absolute path to the repository root. func (r *Repository) Root() string { - return r.rootPath + return r.root.Native() } func (r *Repository) CurrentBranch() (string, error) { // load .git/HEAD - ref, err := LoadReferenceFile(filepath.Join(r.rootPath, GitDirectoryName, "HEAD")) + ref, err := LoadReferenceFile(r.root, path.Join(GitDirectoryName, "HEAD")) if err != nil { return "", err } @@ -64,7 +64,7 @@ func (r *Repository) CurrentBranch() (string, error) { func (r *Repository) LatestCommit() (string, error) { // load .git/HEAD - ref, err := LoadReferenceFile(filepath.Join(r.rootPath, GitDirectoryName, "HEAD")) + ref, err := LoadReferenceFile(r.root, path.Join(GitDirectoryName, "HEAD")) if err != nil { return "", err } @@ -83,7 +83,7 @@ func (r *Repository) LatestCommit() (string, error) { if err != nil { return "", err } - branchHeadRef, err := LoadReferenceFile(filepath.Join(r.rootPath, GitDirectoryName, branchHeadPath)) + branchHeadRef, err := LoadReferenceFile(r.root, path.Join(GitDirectoryName, branchHeadPath)) if err != nil { return "", err } @@ -108,7 +108,7 @@ func (r *Repository) loadConfig() error { if err != nil { return fmt.Errorf("unable to load user specific gitconfig: %w", err) } - err = config.loadFile(filepath.Join(r.rootPath, ".git/config")) + err = config.loadFile(r.root, ".git/config") if err != nil { return fmt.Errorf("unable to load repository specific gitconfig: %w", err) } @@ -119,7 +119,7 @@ func (r *Repository) loadConfig() error { // newIgnoreFile constructs a new [ignoreRules] implementation backed by // a file using the specified path relative to the repository root. func (r *Repository) newIgnoreFile(relativeIgnoreFilePath string) ignoreRules { - return newIgnoreFile(filepath.Join(r.rootPath, relativeIgnoreFilePath)) + return newIgnoreFile(r.root, relativeIgnoreFilePath) } // getIgnoreRules returns a slice of [ignoreRules] that apply @@ -132,7 +132,7 @@ func (r *Repository) getIgnoreRules(prefix string) []ignoreRules { return fs } - r.ignore[prefix] = append(r.ignore[prefix], r.newIgnoreFile(filepath.Join(prefix, gitIgnoreFileName))) + r.ignore[prefix] = append(r.ignore[prefix], r.newIgnoreFile(path.Join(prefix, gitIgnoreFileName))) return r.ignore[prefix] } @@ -149,7 +149,7 @@ func (r *Repository) taintIgnoreRules() { // Ignore computes whether to ignore the specified path. // The specified path is relative to the repository root path. func (r *Repository) Ignore(relPath string) (bool, error) { - parts := strings.Split(filepath.ToSlash(relPath), "/") + parts := strings.Split(relPath, "/") // Retain trailing slash for directory patterns. // We know a trailing slash was present if the last element @@ -186,14 +186,9 @@ func (r *Repository) Ignore(relPath string) (bool, error) { return false, nil } -func NewRepository(path string) (*Repository, error) { - path, err := filepath.Abs(path) - if err != nil { - return nil, err - } - +func NewRepository(path vfs.Path) (*Repository, error) { real := true - rootPath, err := folders.FindDirWithLeaf(path, GitDirectoryName) + rootPath, err := vfs.FindLeafInTree(path, GitDirectoryName) if err != nil { if !os.IsNotExist(err) { return nil, err @@ -205,9 +200,9 @@ func NewRepository(path string) (*Repository, error) { } repo := &Repository{ - real: real, - rootPath: rootPath, - ignore: make(map[string][]ignoreRules), + real: real, + root: rootPath, + ignore: make(map[string][]ignoreRules), } err = repo.loadConfig() @@ -221,13 +216,21 @@ func NewRepository(path string) (*Repository, error) { return nil, fmt.Errorf("unable to access core excludes file: %w", err) } + // Load global excludes on this machine. + // This is by definition a local path so we create a new [vfs.Path] instance. + coreExcludes := newStringIgnoreRules([]string{}) + if coreExcludesPath != "" { + dir := filepath.Dir(coreExcludesPath) + base := filepath.Base(coreExcludesPath) + coreExcludes = newIgnoreFile(vfs.MustNew(dir), base) + } + // Initialize root ignore rules. // These are special and not lazily initialized because: // 1) we include a hardcoded ignore pattern // 2) we include a gitignore file at a non-standard path repo.ignore["."] = []ignoreRules{ - // Load global excludes on this machine. - newIgnoreFile(coreExcludesPath), + coreExcludes, // Always ignore root .git directory. newStringIgnoreRules([]string{ ".git", diff --git a/libs/git/repository_test.go b/libs/git/repository_test.go index fb0e38080..7ddc7ea79 100644 --- a/libs/git/repository_test.go +++ b/libs/git/repository_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -43,7 +44,7 @@ func newTestRepository(t *testing.T) *testRepository { _, err = f2.WriteString(`ref: refs/heads/main`) require.NoError(t, err) - repo, err := NewRepository(tmp) + repo, err := NewRepository(vfs.MustNew(tmp)) require.NoError(t, err) return &testRepository{ @@ -53,7 +54,7 @@ func newTestRepository(t *testing.T) *testRepository { } func (testRepo *testRepository) checkoutCommit(commitId string) { - f, err := os.OpenFile(filepath.Join(testRepo.r.rootPath, ".git", "HEAD"), os.O_WRONLY|os.O_TRUNC, os.ModePerm) + f, err := os.OpenFile(filepath.Join(testRepo.r.Root(), ".git", "HEAD"), os.O_WRONLY|os.O_TRUNC, os.ModePerm) require.NoError(testRepo.t, err) defer f.Close() @@ -63,7 +64,7 @@ func (testRepo *testRepository) checkoutCommit(commitId string) { func (testRepo *testRepository) addBranch(name string, latestCommit string) { // create dir for branch head reference - branchDir := filepath.Join(testRepo.r.rootPath, ".git", "refs", "heads") + branchDir := filepath.Join(testRepo.r.Root(), ".git", "refs", "heads") err := os.MkdirAll(branchDir, os.ModePerm) require.NoError(testRepo.t, err) @@ -78,7 +79,7 @@ func (testRepo *testRepository) addBranch(name string, latestCommit string) { } func (testRepo *testRepository) checkoutBranch(name string) { - f, err := os.OpenFile(filepath.Join(testRepo.r.rootPath, ".git", "HEAD"), os.O_WRONLY|os.O_TRUNC, os.ModePerm) + f, err := os.OpenFile(filepath.Join(testRepo.r.Root(), ".git", "HEAD"), os.O_WRONLY|os.O_TRUNC, os.ModePerm) require.NoError(testRepo.t, err) defer f.Close() @@ -89,7 +90,7 @@ func (testRepo *testRepository) checkoutBranch(name string) { // add remote origin url to test repo func (testRepo *testRepository) addOriginUrl(url string) { // open config in append mode - f, err := os.OpenFile(filepath.Join(testRepo.r.rootPath, ".git", "config"), os.O_WRONLY|os.O_APPEND, os.ModePerm) + f, err := os.OpenFile(filepath.Join(testRepo.r.Root(), ".git", "config"), os.O_WRONLY|os.O_APPEND, os.ModePerm) require.NoError(testRepo.t, err) defer f.Close() @@ -128,7 +129,7 @@ func (testRepo *testRepository) assertOriginUrl(expected string) { func TestRepository(t *testing.T) { // Load this repository as test. - repo, err := NewRepository("../..") + repo, err := NewRepository(vfs.MustNew("../..")) tr := testRepository{t, repo} require.NoError(t, err) @@ -142,7 +143,7 @@ func TestRepository(t *testing.T) { assert.True(t, tr.Ignore("vendor/")) // Check that ignores under testdata work. - assert.True(t, tr.Ignore(filepath.Join("libs", "git", "testdata", "root.ignoreme"))) + assert.True(t, tr.Ignore("libs/git/testdata/root.ignoreme")) } func TestRepositoryGitConfigForEmptyRepo(t *testing.T) { @@ -192,7 +193,7 @@ func TestRepositoryGitConfigForSshUrl(t *testing.T) { func TestRepositoryGitConfigWhenNotARepo(t *testing.T) { tmp := t.TempDir() - repo, err := NewRepository(tmp) + repo, err := NewRepository(vfs.MustNew(tmp)) require.NoError(t, err) branch, err := repo.CurrentBranch() diff --git a/libs/git/view.go b/libs/git/view.go index 3cb88d8b1..90eed0bb8 100644 --- a/libs/git/view.go +++ b/libs/git/view.go @@ -1,9 +1,13 @@ package git import ( + "fmt" "os" + "path" "path/filepath" "strings" + + "github.com/databricks/cli/libs/vfs" ) // View represents a view on a directory tree that takes into account @@ -29,17 +33,15 @@ type View struct { // Ignore computes whether to ignore the specified path. // The specified path is relative to the view's target path. -func (v *View) Ignore(path string) (bool, error) { - path = filepath.ToSlash(path) - +func (v *View) Ignore(relPath string) (bool, error) { // Retain trailing slash for directory patterns. // Needs special handling because it is removed by path cleaning. trailingSlash := "" - if strings.HasSuffix(path, "/") { + if strings.HasSuffix(relPath, "/") { trailingSlash = "/" } - return v.repo.Ignore(filepath.Join(v.targetPath, path) + trailingSlash) + return v.repo.Ignore(path.Join(v.targetPath, relPath) + trailingSlash) } // IgnoreFile returns if the gitignore rules in this fileset @@ -70,26 +72,27 @@ func (v *View) IgnoreDirectory(dir string) (bool, error) { return v.Ignore(dir + "/") } -func NewView(path string) (*View, error) { - path, err := filepath.Abs(path) - if err != nil { - return nil, err - } - - repo, err := NewRepository(path) +func NewView(root vfs.Path) (*View, error) { + repo, err := NewRepository(root) if err != nil { return nil, err } // Target path must be relative to the repository root path. - targetPath, err := filepath.Rel(repo.rootPath, path) - if err != nil { - return nil, err + target := root.Native() + prefix := repo.root.Native() + if !strings.HasPrefix(target, prefix) { + return nil, fmt.Errorf("path %q is not within repository root %q", root.Native(), prefix) } + // Make target a relative path. + target = strings.TrimPrefix(target, prefix) + target = strings.TrimPrefix(target, string(os.PathSeparator)) + target = path.Clean(filepath.ToSlash(target)) + return &View{ repo: repo, - targetPath: targetPath, + targetPath: target, }, nil } diff --git a/libs/git/view_test.go b/libs/git/view_test.go index 3ecd301b5..76fba3458 100644 --- a/libs/git/view_test.go +++ b/libs/git/view_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -89,19 +90,19 @@ func testViewAtRoot(t *testing.T, tv testView) { } func TestViewRootInBricksRepo(t *testing.T) { - v, err := NewView("./testdata") + v, err := NewView(vfs.MustNew("./testdata")) require.NoError(t, err) testViewAtRoot(t, testView{t, v}) } func TestViewRootInTempRepo(t *testing.T) { - v, err := NewView(createFakeRepo(t, "testdata")) + v, err := NewView(vfs.MustNew(createFakeRepo(t, "testdata"))) require.NoError(t, err) testViewAtRoot(t, testView{t, v}) } func TestViewRootInTempDir(t *testing.T) { - v, err := NewView(copyTestdata(t, "testdata")) + v, err := NewView(vfs.MustNew(copyTestdata(t, "testdata"))) require.NoError(t, err) testViewAtRoot(t, testView{t, v}) } @@ -124,20 +125,20 @@ func testViewAtA(t *testing.T, tv testView) { } func TestViewAInBricksRepo(t *testing.T) { - v, err := NewView("./testdata/a") + v, err := NewView(vfs.MustNew("./testdata/a")) require.NoError(t, err) testViewAtA(t, testView{t, v}) } func TestViewAInTempRepo(t *testing.T) { - v, err := NewView(filepath.Join(createFakeRepo(t, "testdata"), "a")) + v, err := NewView(vfs.MustNew(filepath.Join(createFakeRepo(t, "testdata"), "a"))) require.NoError(t, err) testViewAtA(t, testView{t, v}) } func TestViewAInTempDir(t *testing.T) { // Since this is not a fake repo it should not traverse up the tree. - v, err := NewView(filepath.Join(copyTestdata(t, "testdata"), "a")) + v, err := NewView(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a"))) require.NoError(t, err) tv := testView{t, v} @@ -174,20 +175,20 @@ func testViewAtAB(t *testing.T, tv testView) { } func TestViewABInBricksRepo(t *testing.T) { - v, err := NewView("./testdata/a/b") + v, err := NewView(vfs.MustNew("./testdata/a/b")) require.NoError(t, err) testViewAtAB(t, testView{t, v}) } func TestViewABInTempRepo(t *testing.T) { - v, err := NewView(filepath.Join(createFakeRepo(t, "testdata"), "a", "b")) + v, err := NewView(vfs.MustNew(filepath.Join(createFakeRepo(t, "testdata"), "a", "b"))) require.NoError(t, err) testViewAtAB(t, testView{t, v}) } func TestViewABInTempDir(t *testing.T) { // Since this is not a fake repo it should not traverse up the tree. - v, err := NewView(filepath.Join(copyTestdata(t, "testdata"), "a", "b")) + v, err := NewView(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a", "b"))) tv := testView{t, v} require.NoError(t, err) @@ -214,7 +215,7 @@ func TestViewDoesNotChangeGitignoreIfCacheDirAlreadyIgnoredAtRoot(t *testing.T) // Since root .gitignore already has .databricks, there should be no edits // to root .gitignore - v, err := NewView(repoPath) + v, err := NewView(vfs.MustNew(repoPath)) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -234,7 +235,7 @@ func TestViewDoesNotChangeGitignoreIfCacheDirAlreadyIgnoredInSubdir(t *testing.T // Since root .gitignore already has .databricks, there should be no edits // to a/.gitignore - v, err := NewView(filepath.Join(repoPath, "a")) + v, err := NewView(vfs.MustNew(filepath.Join(repoPath, "a"))) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -252,7 +253,7 @@ func TestViewAddsGitignoreWithCacheDir(t *testing.T) { assert.NoError(t, err) // Since root .gitignore was deleted, new view adds .databricks to root .gitignore - v, err := NewView(repoPath) + v, err := NewView(vfs.MustNew(repoPath)) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -270,7 +271,7 @@ func TestViewAddsGitignoreWithCacheDirAtSubdir(t *testing.T) { require.NoError(t, err) // Since root .gitignore was deleted, new view adds .databricks to a/.gitignore - v, err := NewView(filepath.Join(repoPath, "a")) + v, err := NewView(vfs.MustNew(filepath.Join(repoPath, "a"))) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -287,7 +288,7 @@ func TestViewAddsGitignoreWithCacheDirAtSubdir(t *testing.T) { func TestViewAlwaysIgnoresCacheDir(t *testing.T) { repoPath := createFakeRepo(t, "testdata") - v, err := NewView(repoPath) + v, err := NewView(vfs.MustNew(repoPath)) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() diff --git a/libs/notebook/detect.go b/libs/notebook/detect.go index 17685f3bf..0b7c04d6d 100644 --- a/libs/notebook/detect.go +++ b/libs/notebook/detect.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "io" + "io/fs" "os" "path/filepath" "strings" @@ -15,8 +16,8 @@ import ( const headerLength = 32 // readHeader reads the first N bytes from a file. -func readHeader(path string) ([]byte, error) { - f, err := os.Open(path) +func readHeader(fsys fs.FS, name string) ([]byte, error) { + f, err := fsys.Open(name) if err != nil { return nil, err } @@ -36,10 +37,10 @@ func readHeader(path string) ([]byte, error) { // Detect returns whether the file at path is a Databricks notebook. // If it is, it returns the notebook language. -func Detect(path string) (notebook bool, language workspace.Language, err error) { +func DetectWithFS(fsys fs.FS, name string) (notebook bool, language workspace.Language, err error) { header := "" - buf, err := readHeader(path) + buf, err := readHeader(fsys, name) if err != nil { return false, "", err } @@ -48,7 +49,7 @@ func Detect(path string) (notebook bool, language workspace.Language, err error) fileHeader := scanner.Text() // Determine which header to expect based on filename extension. - ext := strings.ToLower(filepath.Ext(path)) + ext := strings.ToLower(filepath.Ext(name)) switch ext { case ".py": header = `# Databricks notebook source` @@ -63,7 +64,7 @@ func Detect(path string) (notebook bool, language workspace.Language, err error) header = "-- Databricks notebook source" language = workspace.LanguageSql case ".ipynb": - return DetectJupyter(path) + return DetectJupyterWithFS(fsys, name) default: return false, "", nil } @@ -74,3 +75,11 @@ func Detect(path string) (notebook bool, language workspace.Language, err error) return true, language, nil } + +// Detect calls DetectWithFS with the local filesystem. +// The name argument may be a local relative path or a local absolute path. +func Detect(name string) (notebook bool, language workspace.Language, err error) { + d := filepath.ToSlash(filepath.Dir(name)) + b := filepath.Base(name) + return DetectWithFS(os.DirFS(d), b) +} diff --git a/libs/notebook/detect_jupyter.go b/libs/notebook/detect_jupyter.go index 7d96763cd..f631b5812 100644 --- a/libs/notebook/detect_jupyter.go +++ b/libs/notebook/detect_jupyter.go @@ -3,7 +3,9 @@ package notebook import ( "encoding/json" "fmt" + "io/fs" "os" + "path/filepath" "github.com/databricks/databricks-sdk-go/service/workspace" ) @@ -56,8 +58,8 @@ func resolveLanguage(nb *jupyter) workspace.Language { // DetectJupyter returns whether the file at path is a valid Jupyter notebook. // We assume it is valid if we can read it as JSON and see a couple expected fields. // If we cannot, importing into the workspace will always fail, so we also return an error. -func DetectJupyter(path string) (notebook bool, language workspace.Language, err error) { - f, err := os.Open(path) +func DetectJupyterWithFS(fsys fs.FS, name string) (notebook bool, language workspace.Language, err error) { + f, err := fsys.Open(name) if err != nil { return false, "", err } @@ -68,18 +70,26 @@ func DetectJupyter(path string) (notebook bool, language workspace.Language, err dec := json.NewDecoder(f) err = dec.Decode(&nb) if err != nil { - return false, "", fmt.Errorf("%s: error loading Jupyter notebook file: %w", path, err) + return false, "", fmt.Errorf("%s: error loading Jupyter notebook file: %w", name, err) } // Not a Jupyter notebook if the cells or metadata fields aren't defined. if nb.Cells == nil || nb.Metadata == nil { - return false, "", fmt.Errorf("%s: invalid Jupyter notebook file", path) + return false, "", fmt.Errorf("%s: invalid Jupyter notebook file", name) } // Major version must be at least 4. if nb.NbFormatMajor < 4 { - return false, "", fmt.Errorf("%s: unsupported Jupyter notebook version: %d", path, nb.NbFormatMajor) + return false, "", fmt.Errorf("%s: unsupported Jupyter notebook version: %d", name, nb.NbFormatMajor) } return true, resolveLanguage(&nb), nil } + +// DetectJupyter calls DetectJupyterWithFS with the local filesystem. +// The name argument may be a local relative path or a local absolute path. +func DetectJupyter(name string) (notebook bool, language workspace.Language, err error) { + d := filepath.ToSlash(filepath.Dir(name)) + b := filepath.Base(name) + return DetectJupyterWithFS(os.DirFS(d), b) +} diff --git a/libs/sync/diff.go b/libs/sync/diff.go index 074bfc56c..e91f7277e 100644 --- a/libs/sync/diff.go +++ b/libs/sync/diff.go @@ -2,7 +2,6 @@ package sync import ( "path" - "path/filepath" "golang.org/x/exp/maps" ) @@ -64,7 +63,7 @@ func (d *diff) addFilesWithRemoteNameChanged(after *SnapshotState, before *Snaps func (d *diff) addNewFiles(after *SnapshotState, before *SnapshotState) { for localName := range after.LastModifiedTimes { if _, ok := before.LastModifiedTimes[localName]; !ok { - d.put = append(d.put, filepath.ToSlash(localName)) + d.put = append(d.put, localName) } } @@ -79,7 +78,7 @@ func (d *diff) addUpdatedFiles(after *SnapshotState, before *SnapshotState) { for localName, modTime := range after.LastModifiedTimes { prevModTime, ok := before.LastModifiedTimes[localName] if ok && modTime.After(prevModTime) { - d.put = append(d.put, filepath.ToSlash(localName)) + d.put = append(d.put, localName) } } } diff --git a/libs/sync/dirset.go b/libs/sync/dirset.go index 3c37c97cf..33b85cb8e 100644 --- a/libs/sync/dirset.go +++ b/libs/sync/dirset.go @@ -2,7 +2,6 @@ package sync import ( "path" - "path/filepath" "sort" ) @@ -16,8 +15,8 @@ func MakeDirSet(files []string) DirSet { // Iterate over all files. for _, f := range files { - // Get the directory of the file in /-separated form. - dir := filepath.ToSlash(filepath.Dir(f)) + // Get the directory of the file. + dir := path.Dir(f) // Add this directory and its parents until it is either "." or already in the set. for dir != "." { diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index a27a8c84f..392e274d4 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -172,6 +172,11 @@ func loadOrNewSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error return nil, fmt.Errorf("failed to json unmarshal persisted snapshot: %s", err) } + // Ensure that all paths are slash-separated upon loading + // an existing snapshot file. If it was created by an older + // CLI version (<= v0.220.0), it may contain backslashes. + snapshot.SnapshotState = snapshot.SnapshotState.ToSlash() + snapshot.New = false return snapshot, nil } diff --git a/libs/sync/snapshot_state.go b/libs/sync/snapshot_state.go index 10cd34e6d..09bb5b63e 100644 --- a/libs/sync/snapshot_state.go +++ b/libs/sync/snapshot_state.go @@ -2,6 +2,7 @@ package sync import ( "fmt" + "path" "path/filepath" "strings" "time" @@ -48,7 +49,7 @@ func NewSnapshotState(localFiles []fileset.File) (*SnapshotState, error) { for k := range localFiles { f := &localFiles[k] // Compute the remote name the file will have in WSFS - remoteName := filepath.ToSlash(f.Relative) + remoteName := f.Relative isNotebook, err := f.IsNotebook() if err != nil { @@ -57,7 +58,7 @@ func NewSnapshotState(localFiles []fileset.File) (*SnapshotState, error) { continue } if isNotebook { - ext := filepath.Ext(remoteName) + ext := path.Ext(remoteName) remoteName = strings.TrimSuffix(remoteName, ext) } @@ -119,3 +120,30 @@ func (fs *SnapshotState) validate() error { } return nil } + +// ToSlash ensures all local paths in the snapshot state +// are slash-separated. Returns a new snapshot state. +func (old SnapshotState) ToSlash() *SnapshotState { + new := SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + } + + // Keys are local paths. + for k, v := range old.LastModifiedTimes { + new.LastModifiedTimes[filepath.ToSlash(k)] = v + } + + // Keys are local paths. + for k, v := range old.LocalToRemoteNames { + new.LocalToRemoteNames[filepath.ToSlash(k)] = v + } + + // Values are remote paths. + for k, v := range old.RemoteToLocalNames { + new.RemoteToLocalNames[k] = filepath.ToSlash(v) + } + + return &new +} diff --git a/libs/sync/snapshot_state_test.go b/libs/sync/snapshot_state_test.go index bfcdbef65..92c14e8e0 100644 --- a/libs/sync/snapshot_state_test.go +++ b/libs/sync/snapshot_state_test.go @@ -1,25 +1,27 @@ package sync import ( + "runtime" "testing" "time" "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSnapshotState(t *testing.T) { - fileSet := fileset.New("./testdata/sync-fileset") + fileSet := fileset.New(vfs.MustNew("./testdata/sync-fileset")) files, err := fileSet.All() require.NoError(t, err) // Assert initial contents of the fileset assert.Len(t, files, 4) - assert.Equal(t, "invalid-nb.ipynb", files[0].Name()) - assert.Equal(t, "my-nb.py", files[1].Name()) - assert.Equal(t, "my-script.py", files[2].Name()) - assert.Equal(t, "valid-nb.ipynb", files[3].Name()) + assert.Equal(t, "invalid-nb.ipynb", files[0].Relative) + assert.Equal(t, "my-nb.py", files[1].Relative) + assert.Equal(t, "my-script.py", files[2].Relative) + assert.Equal(t, "valid-nb.ipynb", files[3].Relative) // Assert snapshot state generated from the fileset. Note that the invalid notebook // has been ignored. @@ -114,3 +116,30 @@ func TestSnapshotStateValidationErrors(t *testing.T) { } assert.EqualError(t, s.validate(), "invalid sync state representation. Inconsistent values found. Remote file c points to a. Local file a points to b") } + +func TestSnapshotStateWithBackslashes(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("Skipping test on non-Windows platform") + } + + now := time.Now() + s1 := &SnapshotState{ + LastModifiedTimes: map[string]time.Time{ + "foo\\bar.py": now, + }, + LocalToRemoteNames: map[string]string{ + "foo\\bar.py": "foo/bar", + }, + RemoteToLocalNames: map[string]string{ + "foo/bar": "foo\\bar.py", + }, + } + + assert.NoError(t, s1.validate()) + + s2 := s1.ToSlash() + assert.NoError(t, s1.validate()) + assert.Equal(t, map[string]time.Time{"foo/bar.py": now}, s2.LastModifiedTimes) + assert.Equal(t, map[string]string{"foo/bar.py": "foo/bar"}, s2.LocalToRemoteNames) + assert.Equal(t, map[string]string{"foo/bar": "foo/bar.py"}, s2.RemoteToLocalNames) +} diff --git a/libs/sync/snapshot_test.go b/libs/sync/snapshot_test.go index d6358d4a1..050b5d965 100644 --- a/libs/sync/snapshot_test.go +++ b/libs/sync/snapshot_test.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/testfile" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -29,7 +30,7 @@ func TestDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -93,7 +94,7 @@ func TestSymlinkDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -124,7 +125,7 @@ func TestFolderDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -169,7 +170,7 @@ func TestPythonNotebookDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -244,7 +245,7 @@ func TestErrorWhenIdenticalRemoteName(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -281,7 +282,7 @@ func TestNoErrorRenameWithIdenticalRemoteName(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ diff --git a/libs/sync/sync.go b/libs/sync/sync.go index 30b68ccf3..585e8a887 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -10,12 +10,13 @@ import ( "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/set" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/iam" ) type SyncOptions struct { - LocalPath string + LocalPath vfs.Path RemotePath string Include []string Exclude []string @@ -54,6 +55,7 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) { if err != nil { return nil, err } + err = fileSet.EnsureValidGitIgnoreExists() if err != nil { return nil, err @@ -186,7 +188,7 @@ func (s *Sync) GetFileList(ctx context.Context) ([]fileset.File, error) { // tradeoff: doing portable monitoring only due to macOS max descriptor manual ulimit setting requirement // https://github.com/gorakhargosh/watchdog/blob/master/src/watchdog/observers/kqueue.py#L394-L418 all := set.NewSetF(func(f fileset.File) string { - return f.Absolute + return f.Relative }) gitFiles, err := s.fileSet.All() if err != nil { diff --git a/libs/sync/sync_test.go b/libs/sync/sync_test.go index dc220dbf7..292586e8d 100644 --- a/libs/sync/sync_test.go +++ b/libs/sync/sync_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/git" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/require" ) @@ -73,16 +74,17 @@ func TestGetFileSet(t *testing.T) { ctx := context.Background() dir := setupFiles(t) - fileSet, err := git.NewFileSet(dir) + root := vfs.MustNew(dir) + fileSet, err := git.NewFileSet(root) require.NoError(t, err) err = fileSet.EnsureValidGitIgnoreExists() require.NoError(t, err) - inc, err := fileset.NewGlobSet(dir, []string{}) + inc, err := fileset.NewGlobSet(root, []string{}) require.NoError(t, err) - excl, err := fileset.NewGlobSet(dir, []string{}) + excl, err := fileset.NewGlobSet(root, []string{}) require.NoError(t, err) s := &Sync{ @@ -97,10 +99,10 @@ func TestGetFileSet(t *testing.T) { require.NoError(t, err) require.Equal(t, len(fileList), 9) - inc, err = fileset.NewGlobSet(dir, []string{}) + inc, err = fileset.NewGlobSet(root, []string{}) require.NoError(t, err) - excl, err = fileset.NewGlobSet(dir, []string{"*.go"}) + excl, err = fileset.NewGlobSet(root, []string{"*.go"}) require.NoError(t, err) s = &Sync{ @@ -115,10 +117,10 @@ func TestGetFileSet(t *testing.T) { require.NoError(t, err) require.Equal(t, len(fileList), 1) - inc, err = fileset.NewGlobSet(dir, []string{".databricks/*"}) + inc, err = fileset.NewGlobSet(root, []string{".databricks/*"}) require.NoError(t, err) - excl, err = fileset.NewGlobSet(dir, []string{}) + excl, err = fileset.NewGlobSet(root, []string{}) require.NoError(t, err) s = &Sync{ @@ -138,16 +140,17 @@ func TestRecursiveExclude(t *testing.T) { ctx := context.Background() dir := setupFiles(t) - fileSet, err := git.NewFileSet(dir) + root := vfs.MustNew(dir) + fileSet, err := git.NewFileSet(root) require.NoError(t, err) err = fileSet.EnsureValidGitIgnoreExists() require.NoError(t, err) - inc, err := fileset.NewGlobSet(dir, []string{}) + inc, err := fileset.NewGlobSet(root, []string{}) require.NoError(t, err) - excl, err := fileset.NewGlobSet(dir, []string{"test/**"}) + excl, err := fileset.NewGlobSet(root, []string{"test/**"}) require.NoError(t, err) s := &Sync{ diff --git a/libs/sync/watchdog.go b/libs/sync/watchdog.go index b0c96e01c..ca7ec46e9 100644 --- a/libs/sync/watchdog.go +++ b/libs/sync/watchdog.go @@ -4,8 +4,6 @@ import ( "context" "errors" "io/fs" - "os" - "path/filepath" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" @@ -59,7 +57,7 @@ func (s *Sync) applyMkdir(ctx context.Context, localName string) error { func (s *Sync) applyPut(ctx context.Context, localName string) error { s.notifyProgress(ctx, EventActionPut, localName, 0.0) - localFile, err := os.Open(filepath.Join(s.LocalPath, localName)) + localFile, err := s.LocalPath.Open(localName) if err != nil { return err } diff --git a/libs/vfs/leaf.go b/libs/vfs/leaf.go new file mode 100644 index 000000000..8c11f9039 --- /dev/null +++ b/libs/vfs/leaf.go @@ -0,0 +1,29 @@ +package vfs + +import ( + "errors" + "io/fs" +) + +// FindLeafInTree returns the first path that holds `name`, +// traversing up to the root of the filesystem, starting at `p`. +func FindLeafInTree(p Path, name string) (Path, error) { + for p != nil { + _, err := fs.Stat(p, name) + + // No error means we found the leaf in p. + if err == nil { + return p, nil + } + + // ErrNotExist means we continue traversal up the tree. + if errors.Is(err, fs.ErrNotExist) { + p = p.Parent() + continue + } + + return nil, err + } + + return nil, fs.ErrNotExist +} diff --git a/libs/vfs/leaf_test.go b/libs/vfs/leaf_test.go new file mode 100644 index 000000000..da9412ec0 --- /dev/null +++ b/libs/vfs/leaf_test.go @@ -0,0 +1,38 @@ +package vfs + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFindLeafInTree(t *testing.T) { + wd, err := os.Getwd() + require.NoError(t, err) + + root := filepath.Join(wd, "..", "..") + + // Find from working directory should work. + { + out, err := FindLeafInTree(MustNew(wd), ".git") + assert.NoError(t, err) + assert.Equal(t, root, out.Native()) + } + + // Find from project root itself should work. + { + out, err := FindLeafInTree(MustNew(root), ".git") + assert.NoError(t, err) + assert.Equal(t, root, out.Native()) + } + + // Find for something that doesn't exist should work. + { + out, err := FindLeafInTree(MustNew(root), "this-leaf-doesnt-exist-anywhere") + assert.ErrorIs(t, err, os.ErrNotExist) + assert.Equal(t, nil, out) + } +} diff --git a/libs/vfs/os.go b/libs/vfs/os.go new file mode 100644 index 000000000..26447d830 --- /dev/null +++ b/libs/vfs/os.go @@ -0,0 +1,82 @@ +package vfs + +import ( + "io/fs" + "os" + "path/filepath" +) + +type osPath struct { + path string + + openFn func(name string) (fs.File, error) + statFn func(name string) (fs.FileInfo, error) + readDirFn func(name string) ([]fs.DirEntry, error) + readFileFn func(name string) ([]byte, error) +} + +func New(name string) (Path, error) { + abs, err := filepath.Abs(name) + if err != nil { + return nil, err + } + + return newOsPath(abs), nil +} + +func MustNew(name string) Path { + p, err := New(name) + if err != nil { + panic(err) + } + + return p +} + +func newOsPath(name string) Path { + if !filepath.IsAbs(name) { + panic("vfs: abs path must be absolute") + } + + // [os.DirFS] implements all required interfaces. + // We used type assertion below to get the underlying types. + dirfs := os.DirFS(name) + + return &osPath{ + path: name, + + openFn: dirfs.Open, + statFn: dirfs.(fs.StatFS).Stat, + readDirFn: dirfs.(fs.ReadDirFS).ReadDir, + readFileFn: dirfs.(fs.ReadFileFS).ReadFile, + } +} + +func (o osPath) Open(name string) (fs.File, error) { + return o.openFn(name) +} + +func (o osPath) Stat(name string) (fs.FileInfo, error) { + return o.statFn(name) +} + +func (o osPath) ReadDir(name string) ([]fs.DirEntry, error) { + return o.readDirFn(name) +} + +func (o osPath) ReadFile(name string) ([]byte, error) { + return o.readFileFn(name) +} + +func (o osPath) Parent() Path { + dir := filepath.Dir(o.path) + if dir == o.path { + return nil + } + + return newOsPath(dir) +} + +func (o osPath) Native() string { + return o.path +} diff --git a/libs/vfs/os_test.go b/libs/vfs/os_test.go new file mode 100644 index 000000000..6199bdc71 --- /dev/null +++ b/libs/vfs/os_test.go @@ -0,0 +1,54 @@ +package vfs + +import ( + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestOsNewWithRelativePath(t *testing.T) { + wd, err := os.Getwd() + require.NoError(t, err) + + p, err := New(".") + require.NoError(t, err) + require.Equal(t, wd, p.Native()) +} + +func TestOsPathParent(t *testing.T) { + wd, err := os.Getwd() + require.NoError(t, err) + + p := MustNew(wd) + require.NotNil(t, p) + + // Traverse all the way to the root. + for { + q := p.Parent() + if q == nil { + // Parent returns nil when it is the root. + break + } + + p = q + } + + // We should have reached the root. + if runtime.GOOS == "windows" { + require.Equal(t, filepath.VolumeName(wd)+`\`, p.Native()) + } else { + require.Equal(t, "/", p.Native()) + } +} + +func TestOsPathNative(t *testing.T) { + wd, err := os.Getwd() + require.NoError(t, err) + + p := MustNew(wd) + require.NotNil(t, p) + require.Equal(t, wd, p.Native()) +} diff --git a/libs/vfs/path.go b/libs/vfs/path.go new file mode 100644 index 000000000..19f119d50 --- /dev/null +++ b/libs/vfs/path.go @@ -0,0 +1,29 @@ +package vfs + +import "io/fs" + +// FS combines the fs.FS, fs.StatFS, fs.ReadDirFS, and fs.ReadFileFS interfaces. +// It mandates that Path implementations must support all these interfaces. +type FS interface { + fs.FS + fs.StatFS + fs.ReadDirFS + fs.ReadFileFS +} + +// Path defines a read-only virtual file system interface for: +// +// 1. Intercepting file operations to inject custom logic (e.g., logging, access control). +// 2. Traversing directories to find specific leaf directories (e.g., .git). +// 3. Converting virtual paths to OS-native paths. +// +// Options 2 and 3 are not possible with the standard fs.FS interface. +// They are needed such that we can provide an instance to the sync package +// and still detect the containing .git directory and convert paths to native paths. +type Path interface { + FS + + Parent() Path + + Native() string +} diff --git a/libs/vfs/path_test.go b/libs/vfs/path_test.go new file mode 100644 index 000000000..54c60940e --- /dev/null +++ b/libs/vfs/path_test.go @@ -0,0 +1 @@ +package vfs From ec33a7c059602d5a0023625d70209b5501dbb2a6 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 30 May 2024 17:29:27 +0530 Subject: [PATCH 204/286] Add `filer.Filer` to read notebooks from WSFS without omitting their extension (#1457) ## Changes This PR adds a filer that'll allow us to read notebooks from the WSFS using their full paths (with the extension included). The filer relies on the existing workspace filer (and consequently the workspace import/export/list APIs). Using this filer along with a virtual filesystem layer (https://github.com/databricks/cli/pull/1452/files) will allow us to use our custom implementation (which preserves the notebook extensions) rather than the default mount available via DBR when the CLI is run from DBR. ## Tests Integration tests. --------- Co-authored-by: Pieter Noordhuis --- internal/filer_test.go | 346 ++++++++++++++++++ internal/helpers.go | 11 + .../workspace_files_extensions_client.go | 345 +++++++++++++++++ 3 files changed, 702 insertions(+) create mode 100644 libs/filer/workspace_files_extensions_client.go diff --git a/internal/filer_test.go b/internal/filer_test.go index d333a1b70..3361de5bc 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -3,9 +3,12 @@ package internal import ( "bytes" "context" + "encoding/json" "errors" + "fmt" "io" "io/fs" + "path" "regexp" "strings" "testing" @@ -37,6 +40,36 @@ func (f filerTest) assertContents(ctx context.Context, name string, contents str assert.Equal(f, contents, body.String()) } +func (f filerTest) assertContentsJupyter(ctx context.Context, name string) { + reader, err := f.Read(ctx, name) + if !assert.NoError(f, err) { + return + } + + defer reader.Close() + + var body bytes.Buffer + _, err = io.Copy(&body, reader) + if !assert.NoError(f, err) { + return + } + + var actual map[string]any + err = json.Unmarshal(body.Bytes(), &actual) + if !assert.NoError(f, err) { + return + } + + // Since a roundtrip to the workspace changes a Jupyter notebook's payload, + // the best we can do is assert that the nbformat is correct. + assert.EqualValues(f, 4, actual["nbformat"]) +} + +func (f filerTest) assertNotExists(ctx context.Context, name string) { + _, err := f.Stat(ctx, name) + assert.ErrorIs(f, err, fs.ErrNotExist) +} + func commonFilerRecursiveDeleteTest(t *testing.T, ctx context.Context, f filer.Filer) { var err error @@ -94,6 +127,7 @@ func TestAccFilerRecursiveDelete(t *testing.T) { {"workspace files", setupWsfsFiler}, {"dbfs", setupDbfsFiler}, {"files", setupUcVolumesFiler}, + {"workspace files extensions", setupWsfsExtensionsFiler}, } { tc := testCase @@ -204,6 +238,7 @@ func TestAccFilerReadWrite(t *testing.T) { {"workspace files", setupWsfsFiler}, {"dbfs", setupDbfsFiler}, {"files", setupUcVolumesFiler}, + {"workspace files extensions", setupWsfsExtensionsFiler}, } { tc := testCase @@ -312,6 +347,7 @@ func TestAccFilerReadDir(t *testing.T) { {"workspace files", setupWsfsFiler}, {"dbfs", setupDbfsFiler}, {"files", setupUcVolumesFiler}, + {"workspace files extensions", setupWsfsExtensionsFiler}, } { tc := testCase @@ -374,6 +410,8 @@ var jupyterNotebookContent2 = ` ` func TestAccFilerWorkspaceNotebookConflict(t *testing.T) { + t.Parallel() + f, _ := setupWsfsFiler(t) ctx := context.Background() var err error @@ -420,6 +458,8 @@ func TestAccFilerWorkspaceNotebookConflict(t *testing.T) { } func TestAccFilerWorkspaceNotebookWithOverwriteFlag(t *testing.T) { + t.Parallel() + f, _ := setupWsfsFiler(t) ctx := context.Background() var err error @@ -462,3 +502,309 @@ func TestAccFilerWorkspaceNotebookWithOverwriteFlag(t *testing.T) { filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"second upload\"))") filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 2\")") } + +func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { + t.Parallel() + + files := []struct { + name string + content string + }{ + {"dir1/dir2/dir3/file.txt", "file content"}, + {"foo.py", "print('foo')"}, + {"foo.r", "print('foo')"}, + {"foo.scala", "println('foo')"}, + {"foo.sql", "SELECT 'foo'"}, + {"jupyterNb.ipynb", jupyterNotebookContent1}, + {"jupyterNb2.ipynb", jupyterNotebookContent2}, + {"pyNb.py", "# Databricks notebook source\nprint('first upload'))"}, + {"rNb.r", "# Databricks notebook source\nprint('first upload'))"}, + {"scalaNb.scala", "// Databricks notebook source\n println(\"first upload\"))"}, + {"sqlNb.sql", "-- Databricks notebook source\n SELECT \"first upload\""}, + } + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + for _, f := range files { + err := wf.Write(ctx, f.name, strings.NewReader(f.content), filer.CreateParentDirectories) + require.NoError(t, err) + } + + // Read entries + entries, err := wf.ReadDir(ctx, ".") + require.NoError(t, err) + assert.Len(t, entries, len(files)) + names := []string{} + for _, e := range entries { + names = append(names, e.Name()) + } + assert.Equal(t, []string{ + "dir1", + "foo.py", + "foo.r", + "foo.scala", + "foo.sql", + "jupyterNb.ipynb", + "jupyterNb2.ipynb", + "pyNb.py", + "rNb.r", + "scalaNb.scala", + "sqlNb.sql", + }, names) +} + +func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { + files := []struct { + name string + content string + }{ + {"foo.py", "# Databricks notebook source\nprint('first upload'))"}, + {"bar.py", "print('foo')"}, + {"jupyter.ipynb", jupyterNotebookContent1}, + {"pretender", "not a notebook"}, + {"dir/file.txt", "file content"}, + {"scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')"}, + } + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + for _, f := range files { + err := wf.Write(ctx, f.name, strings.NewReader(f.content), filer.CreateParentDirectories) + require.NoError(t, err) + } + + return wf +} + +func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf := setupFilerWithExtensionsTest(t) + + // Read contents of test fixtures as a sanity check. + filerTest{t, wf}.assertContents(ctx, "foo.py", "# Databricks notebook source\nprint('first upload'))") + filerTest{t, wf}.assertContents(ctx, "bar.py", "print('foo')") + filerTest{t, wf}.assertContentsJupyter(ctx, "jupyter.ipynb") + filerTest{t, wf}.assertContents(ctx, "dir/file.txt", "file content") + filerTest{t, wf}.assertContents(ctx, "scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')") + filerTest{t, wf}.assertContents(ctx, "pretender", "not a notebook") + + // Read non-existent file + _, err := wf.Read(ctx, "non-existent.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Ensure we do not read a regular file as a notebook + _, err = wf.Read(ctx, "pretender.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + _, err = wf.Read(ctx, "pretender.ipynb") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Read directory + _, err = wf.Read(ctx, "dir") + assert.ErrorIs(t, err, fs.ErrInvalid) + + // Ensure we do not read a Scala notebook as a Python notebook + _, err = wf.Read(ctx, "scala-notebook.py") + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf := setupFilerWithExtensionsTest(t) + + // Delete notebook + err := wf.Delete(ctx, "foo.py") + require.NoError(t, err) + filerTest{t, wf}.assertNotExists(ctx, "foo.py") + + // Delete file + err = wf.Delete(ctx, "bar.py") + require.NoError(t, err) + filerTest{t, wf}.assertNotExists(ctx, "bar.py") + + // Delete jupyter notebook + err = wf.Delete(ctx, "jupyter.ipynb") + require.NoError(t, err) + filerTest{t, wf}.assertNotExists(ctx, "jupyter.ipynb") + + // Delete non-existent file + err = wf.Delete(ctx, "non-existent.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Ensure we do not delete a file as a notebook + err = wf.Delete(ctx, "pretender.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Ensure we do not delete a Scala notebook as a Python notebook + _, err = wf.Read(ctx, "scala-notebook.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Delete directory + err = wf.Delete(ctx, "dir") + assert.ErrorIs(t, err, fs.ErrInvalid) + + // Delete directory recursively + err = wf.Delete(ctx, "dir", filer.DeleteRecursively) + require.NoError(t, err) + filerTest{t, wf}.assertNotExists(ctx, "dir") +} + +func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf := setupFilerWithExtensionsTest(t) + + // Stat on a notebook + info, err := wf.Stat(ctx, "foo.py") + require.NoError(t, err) + assert.Equal(t, "foo.py", info.Name()) + assert.False(t, info.IsDir()) + + // Stat on a file + info, err = wf.Stat(ctx, "bar.py") + require.NoError(t, err) + assert.Equal(t, "bar.py", info.Name()) + assert.False(t, info.IsDir()) + + // Stat on a Jupyter notebook + info, err = wf.Stat(ctx, "jupyter.ipynb") + require.NoError(t, err) + assert.Equal(t, "jupyter.ipynb", info.Name()) + assert.False(t, info.IsDir()) + + // Stat on a directory + info, err = wf.Stat(ctx, "dir") + require.NoError(t, err) + assert.Equal(t, "dir", info.Name()) + assert.True(t, info.IsDir()) + + // Stat on a non-existent file + _, err = wf.Stat(ctx, "non-existent.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Ensure we do not stat a file as a notebook + _, err = wf.Stat(ctx, "pretender.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Ensure we do not stat a Scala notebook as a Python notebook + _, err = wf.Stat(ctx, "scala-notebook.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + _, err = wf.Stat(ctx, "pretender.ipynb") + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestAccFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { + t.Parallel() + + tcases := []struct { + files []struct{ name, content string } + name string + }{ + { + name: "python", + files: []struct{ name, content string }{ + {"foo.py", "print('foo')"}, + {"foo.py", "# Databricks notebook source\nprint('foo')"}, + }, + }, + { + name: "r", + files: []struct{ name, content string }{ + {"foo.r", "print('foo')"}, + {"foo.r", "# Databricks notebook source\nprint('foo')"}, + }, + }, + { + name: "sql", + files: []struct{ name, content string }{ + {"foo.sql", "SELECT 'foo'"}, + {"foo.sql", "-- Databricks notebook source\nSELECT 'foo'"}, + }, + }, + { + name: "scala", + files: []struct{ name, content string }{ + {"foo.scala", "println('foo')"}, + {"foo.scala", "// Databricks notebook source\nprintln('foo')"}, + }, + }, + // We don't need to test this for ipynb notebooks. The import API + // fails when the file extension is .ipynb but the content is not a + // valid juptyer notebook. + } + + for i := range tcases { + tc := tcases[i] + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, tmpDir := setupWsfsExtensionsFiler(t) + + for _, f := range tc.files { + err := wf.Write(ctx, f.name, strings.NewReader(f.content), filer.CreateParentDirectories) + require.NoError(t, err) + } + + _, err := wf.ReadDir(ctx, ".") + assert.ErrorAs(t, err, &filer.DuplicatePathError{}) + assert.ErrorContains(t, err, fmt.Sprintf("failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at %s and FILE at %s resolve to the same name %s. Changing the name of one of these objects will resolve this issue", path.Join(tmpDir, "foo"), path.Join(tmpDir, tc.files[0].name), tc.files[0].name)) + }) + } + +} + +func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + // Create a directory with an extension + err := wf.Mkdir(ctx, "foo") + require.NoError(t, err) + + // Reading foo.py should fail. foo is a directory, not a notebook. + _, err = wf.Read(ctx, "foo.py") + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + // Case 1: Source Notebook + err := wf.Write(ctx, "foo.py", strings.NewReader("# Databricks notebook source\nprint('foo')")) + require.NoError(t, err) + + // The source notebook should exist but not the Jupyter notebook + filerTest{t, wf}.assertContents(ctx, "foo.py", "# Databricks notebook source\nprint('foo')") + _, err = wf.Stat(ctx, "foo.ipynb") + assert.ErrorIs(t, err, fs.ErrNotExist) + _, err = wf.Read(ctx, "foo.ipynb") + assert.ErrorIs(t, err, fs.ErrNotExist) + err = wf.Delete(ctx, "foo.ipynb") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Case 2: Jupyter Notebook + err = wf.Write(ctx, "bar.ipynb", strings.NewReader(jupyterNotebookContent1)) + require.NoError(t, err) + + // The Jupyter notebook should exist but not the source notebook + filerTest{t, wf}.assertContentsJupyter(ctx, "bar.ipynb") + _, err = wf.Stat(ctx, "bar.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + _, err = wf.Read(ctx, "bar.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + err = wf.Delete(ctx, "bar.py") + assert.ErrorIs(t, err, fs.ErrNotExist) +} diff --git a/internal/helpers.go b/internal/helpers.go index 49dc9f4ca..3923e7e1e 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -559,6 +559,17 @@ func setupWsfsFiler(t *testing.T) (filer.Filer, string) { return f, tmpdir } +func setupWsfsExtensionsFiler(t *testing.T) (filer.Filer, string) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + w := databricks.Must(databricks.NewWorkspaceClient()) + tmpdir := TemporaryWorkspaceDir(t, w) + f, err := filer.NewWorkspaceFilesExtensionsClient(w, tmpdir) + require.NoError(t, err) + + return f, tmpdir +} + func setupDbfsFiler(t *testing.T) (filer.Filer, string) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go new file mode 100644 index 000000000..bad748b10 --- /dev/null +++ b/libs/filer/workspace_files_extensions_client.go @@ -0,0 +1,345 @@ +package filer + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "net/http" + "path" + "strings" + + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/notebook" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/marshal" + "github.com/databricks/databricks-sdk-go/service/workspace" +) + +type workspaceFilesExtensionsClient struct { + workspaceClient *databricks.WorkspaceClient + apiClient *client.DatabricksClient + + wsfs Filer + root string +} + +var extensionsToLanguages = map[string]workspace.Language{ + ".py": workspace.LanguagePython, + ".r": workspace.LanguageR, + ".scala": workspace.LanguageScala, + ".sql": workspace.LanguageSql, + ".ipynb": workspace.LanguagePython, +} + +// workspaceFileStatus defines a custom response body for the "/api/2.0/workspace/get-status" API. +// The "repos_export_format" field is not exposed by the SDK. +type workspaceFileStatus struct { + *workspace.ObjectInfo + + // The export format of the notebook. This is not exposed by the SDK. + ReposExportFormat workspace.ExportFormat `json:"repos_export_format,omitempty"` + + // Name of the file to be used in any API calls made using the workspace files + // filer. For notebooks this path does not include the extension. + nameForWorkspaceAPI string +} + +// A custom unmarsaller for the workspaceFileStatus struct. This is needed because +// workspaceFileStatus embeds the workspace.ObjectInfo which itself has a custom +// unmarshaller. +// If a custom unmarshaller is not provided extra fields like ReposExportFormat +// will not have values set. +func (s *workspaceFileStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s *workspaceFileStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +func (w *workspaceFilesExtensionsClient) stat(ctx context.Context, name string) (*workspaceFileStatus, error) { + stat := &workspaceFileStatus{ + nameForWorkspaceAPI: name, + } + + // Perform bespoke API call because "return_export_info" is not exposed by the SDK. + // We need "repos_export_format" to determine if the file is a py or a ipynb notebook. + // This is not exposed by the SDK so we need to make a direct API call. + err := w.apiClient.Do( + ctx, + http.MethodGet, + "/api/2.0/workspace/get-status", + nil, + map[string]string{ + "path": path.Join(w.root, name), + "return_export_info": "true", + }, + stat, + ) + if err != nil { + // If we got an API error we deal with it below. + var aerr *apierr.APIError + if !errors.As(err, &aerr) { + return nil, err + } + + // This API returns a 404 if the specified path does not exist. + if aerr.StatusCode == http.StatusNotFound { + return nil, FileDoesNotExistError{path.Join(w.root, name)} + } + } + return stat, err +} + +// This function returns the stat for the provided notebook. The stat object itself contains the path +// with the extension since it is meant to be used in the context of a fs.FileInfo. +func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx context.Context, name string) (*workspaceFileStatus, error) { + ext := path.Ext(name) + nameWithoutExt := strings.TrimSuffix(name, ext) + + // File name does not have an extension associated with Databricks notebooks, return early. + if _, ok := extensionsToLanguages[ext]; !ok { + return nil, nil + } + + // If the file could be a notebook, check if it is and has the correct language. + stat, err := w.stat(ctx, nameWithoutExt) + if err != nil { + // If the file does not exist, return early. + if errors.As(err, &FileDoesNotExistError{}) { + return nil, nil + } + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Failed to fetch the status of object at %s: %s", name, path.Join(w.root, nameWithoutExt), err) + return nil, err + } + + // Not a notebook. Return early. + if stat.ObjectType != workspace.ObjectTypeNotebook { + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found an object at %s but it is not a notebook. It is a %s.", name, path.Join(w.root, nameWithoutExt), stat.ObjectType) + return nil, nil + } + + // Not the correct language. Return early. + if stat.Language != extensionsToLanguages[ext] { + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not of the correct language. Expected %s but found %s.", name, path.Join(w.root, nameWithoutExt), extensionsToLanguages[ext], stat.Language) + return nil, nil + } + + // When the extension is .py we expect the export format to be source. + // If it's not, return early. + if ext == ".py" && stat.ReposExportFormat != workspace.ExportFormatSource { + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not exported as a source notebook. Its export format is %s.", name, path.Join(w.root, nameWithoutExt), stat.ReposExportFormat) + return nil, nil + } + + // When the extension is .ipynb we expect the export format to be Jupyter. + // If it's not, return early. + if ext == ".ipynb" && stat.ReposExportFormat != workspace.ExportFormatJupyter { + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not exported as a Jupyter notebook. Its export format is %s.", name, path.Join(w.root, nameWithoutExt), stat.ReposExportFormat) + return nil, nil + } + + // Modify the stat object path to include the extension. This stat object will be used + // to return the fs.FileInfo object in the stat method. + stat.Path = stat.Path + ext + return stat, nil +} + +func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx context.Context, name string) (*workspaceFileStatus, error) { + stat, err := w.stat(ctx, name) + if err != nil { + return nil, err + } + + // We expect this internal function to only be called from [ReadDir] when we are sure + // that the object is a notebook. Thus, this should never happen. + if stat.ObjectType != workspace.ObjectTypeNotebook { + return nil, fmt.Errorf("expected object at %s to be a notebook but it is a %s", path.Join(w.root, name), stat.ObjectType) + } + + // Get the extension for the notebook. + ext := notebook.GetExtensionByLanguage(stat.ObjectInfo) + + // If the notebook was exported as a Jupyter notebook, the extension should be .ipynb. + if stat.Language == workspace.LanguagePython && stat.ReposExportFormat == workspace.ExportFormatJupyter { + ext = ".ipynb" + } + + // Modify the stat object path to include the extension. This stat object will be used + // to return the fs.DirEntry object in the ReadDir method. + stat.Path = stat.Path + ext + return stat, nil +} + +type DuplicatePathError struct { + oi1 workspace.ObjectInfo + oi2 workspace.ObjectInfo + + commonName string +} + +func (e DuplicatePathError) Error() string { + return fmt.Sprintf("failed to read files from the workspace file system. Duplicate paths encountered. Both %s at %s and %s at %s resolve to the same name %s. Changing the name of one of these objects will resolve this issue", e.oi1.ObjectType, e.oi1.Path, e.oi2.ObjectType, e.oi2.Path, e.commonName) +} + +// This is a filer for the workspace file system that allows you to pretend the +// workspace file system is a traditional file system. It allows you to list, read, write, +// delete, and stat notebooks (and files in general) in the workspace, using their paths +// with the extension included. +// +// The ReadDir method returns a DuplicatePathError if this traditional file system view is +// not possible. For example, a Python notebook called foo and a Python file called `foo.py` +// would resolve to the same path `foo.py` in a tradition file system. +// +// Users of this filer should be careful when using the Write and Mkdir methods. +// The underlying import API we use to upload notebooks and files returns opaque internal +// errors for namespace clashes (e.g. a file and a notebook or a directory and a notebook). +// Thus users of these methods should be careful to avoid such clashes. +func NewWorkspaceFilesExtensionsClient(w *databricks.WorkspaceClient, root string) (Filer, error) { + apiClient, err := client.New(w.Config) + if err != nil { + return nil, err + } + + filer, err := NewWorkspaceFilesClient(w, root) + if err != nil { + return nil, err + } + + return &workspaceFilesExtensionsClient{ + workspaceClient: w, + apiClient: apiClient, + + wsfs: filer, + root: root, + }, nil +} + +func (w *workspaceFilesExtensionsClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { + entries, err := w.wsfs.ReadDir(ctx, name) + if err != nil { + return nil, err + } + + seenPaths := make(map[string]workspace.ObjectInfo) + for i := range entries { + info, err := entries[i].Info() + if err != nil { + return nil, err + } + sysInfo := info.Sys().(workspace.ObjectInfo) + + // If the object is a notebook, include an extension in the entry. + if sysInfo.ObjectType == workspace.ObjectTypeNotebook { + stat, err := w.getNotebookStatByNameWithoutExt(ctx, entries[i].Name()) + if err != nil { + return nil, err + } + // Replace the entry with the new entry that includes the extension. + entries[i] = wsfsDirEntry{wsfsFileInfo{oi: *stat.ObjectInfo}} + } + + // Error if we have seen this path before in the current directory. + // If not seen before, add it to the seen paths. + if _, ok := seenPaths[entries[i].Name()]; ok { + return nil, DuplicatePathError{ + oi1: seenPaths[entries[i].Name()], + oi2: sysInfo, + commonName: path.Join(name, entries[i].Name()), + } + } + seenPaths[entries[i].Name()] = sysInfo + } + + return entries, nil +} + +// Note: The import API returns opaque internal errors for namespace clashes +// (e.g. a file and a notebook or a directory and a notebook). Thus users of this +// method should be careful to avoid such clashes. +func (w *workspaceFilesExtensionsClient) Write(ctx context.Context, name string, reader io.Reader, mode ...WriteMode) error { + return w.wsfs.Write(ctx, name, reader, mode...) +} + +// Try to read the file as a regular file. If the file is not found, try to read it as a notebook. +func (w *workspaceFilesExtensionsClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { + r, err := w.wsfs.Read(ctx, name) + + // If the file is not found, it might be a notebook. + if errors.As(err, &FileDoesNotExistError{}) { + stat, serr := w.getNotebookStatByNameWithExt(ctx, name) + if serr != nil { + // Unable to stat. Return the stat error. + return nil, serr + } + if stat == nil { + // Not a notebook. Return the original error. + return nil, err + } + + // The workspace files filer performs an additional stat call to make sure + // the path is not a directory. We can skip this step since we already have + // the stat object and know that the path is a notebook. + return w.workspaceClient.Workspace.Download( + ctx, + path.Join(w.root, stat.nameForWorkspaceAPI), + workspace.DownloadFormat(stat.ReposExportFormat), + ) + } + return r, err +} + +// Try to delete the file as a regular file. If the file is not found, try to delete it as a notebook. +func (w *workspaceFilesExtensionsClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { + err := w.wsfs.Delete(ctx, name, mode...) + + // If the file is not found, it might be a notebook. + if errors.As(err, &FileDoesNotExistError{}) { + stat, serr := w.getNotebookStatByNameWithExt(ctx, name) + if serr != nil { + // Unable to stat. Return the stat error. + return serr + } + if stat == nil { + // Not a notebook. Return the original error. + return err + } + + return w.wsfs.Delete(ctx, stat.nameForWorkspaceAPI, mode...) + } + + return err +} + +// Try to stat the file as a regular file. If the file is not found, try to stat it as a notebook. +func (w *workspaceFilesExtensionsClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { + info, err := w.wsfs.Stat(ctx, name) + + // If the file is not found, it might be a notebook. + if errors.As(err, &FileDoesNotExistError{}) { + stat, serr := w.getNotebookStatByNameWithExt(ctx, name) + if serr != nil { + // Unable to stat. Return the stat error. + return nil, serr + } + if stat == nil { + // Not a notebook. Return the original error. + return nil, err + } + + return wsfsFileInfo{oi: *stat.ObjectInfo}, nil + } + + return info, err +} + +// Note: The import API returns opaque internal errors for namespace clashes +// (e.g. a file and a notebook or a directory and a notebook). Thus users of this +// method should be careful to avoid such clashes. +func (w *workspaceFilesExtensionsClient) Mkdir(ctx context.Context, name string) error { + return w.wsfs.Mkdir(ctx, name) +} From 364a609ea7339ab830c23cf9efc428d36f6c4ce3 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 31 May 2024 09:13:43 +0200 Subject: [PATCH 205/286] Upgrade TF provider to 1.46.0 (#1460) ## Changes Release notes in https://github.com/databricks/terraform-provider-databricks/releases/tag/v1.46.0 Notable changes since 1.43.0: * The job resource has been migrated to the Go SDK. More fields are now passed through from DABs into TF. * Improved zero-value handling. ## Tests n/a --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../internal/tf/schema/data_source_catalog.go | 46 +++ bundle/internal/tf/schema/data_source_job.go | 28 +- .../schema/data_source_mlflow_experiment.go | 19 + .../internal/tf/schema/data_source_table.go | 127 ++++++ bundle/internal/tf/schema/data_sources.go | 6 + ...omatic_cluster_update_workspace_setting.go | 39 ++ bundle/internal/tf/schema/resource_cluster.go | 6 - ...ance_security_profile_workspace_setting.go | 15 + ...d_security_monitoring_workspace_setting.go | 14 + bundle/internal/tf/schema/resource_job.go | 363 +++++++++++++----- .../tf/schema/resource_model_serving.go | 16 +- .../tf/schema/resource_quality_monitor.go | 76 ++++ .../internal/tf/schema/resource_sql_table.go | 1 + .../tf/schema/resource_vector_search_index.go | 11 +- bundle/internal/tf/schema/resources.go | 218 ++++++----- bundle/internal/tf/schema/root.go | 2 +- 17 files changed, 757 insertions(+), 232 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_catalog.go create mode 100644 bundle/internal/tf/schema/data_source_mlflow_experiment.go create mode 100644 bundle/internal/tf/schema/data_source_table.go create mode 100644 bundle/internal/tf/schema/resource_automatic_cluster_update_workspace_setting.go create mode 100644 bundle/internal/tf/schema/resource_compliance_security_profile_workspace_setting.go create mode 100644 bundle/internal/tf/schema/resource_enhanced_security_monitoring_workspace_setting.go create mode 100644 bundle/internal/tf/schema/resource_quality_monitor.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index cf98e16e8..f55b6c4f0 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.43.0" +const ProviderVersion = "1.46.0" diff --git a/bundle/internal/tf/schema/data_source_catalog.go b/bundle/internal/tf/schema/data_source_catalog.go new file mode 100644 index 000000000..6f9237cfa --- /dev/null +++ b/bundle/internal/tf/schema/data_source_catalog.go @@ -0,0 +1,46 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceCatalogCatalogInfoEffectivePredictiveOptimizationFlag struct { + InheritedFromName string `json:"inherited_from_name,omitempty"` + InheritedFromType string `json:"inherited_from_type,omitempty"` + Value string `json:"value"` +} + +type DataSourceCatalogCatalogInfoProvisioningInfo struct { + State string `json:"state,omitempty"` +} + +type DataSourceCatalogCatalogInfo struct { + BrowseOnly bool `json:"browse_only,omitempty"` + CatalogType string `json:"catalog_type,omitempty"` + Comment string `json:"comment,omitempty"` + ConnectionName string `json:"connection_name,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"` + FullName string `json:"full_name,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Options map[string]string `json:"options,omitempty"` + Owner string `json:"owner,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + ProviderName string `json:"provider_name,omitempty"` + SecurableKind string `json:"securable_kind,omitempty"` + SecurableType string `json:"securable_type,omitempty"` + ShareName string `json:"share_name,omitempty"` + StorageLocation string `json:"storage_location,omitempty"` + StorageRoot string `json:"storage_root,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + EffectivePredictiveOptimizationFlag *DataSourceCatalogCatalogInfoEffectivePredictiveOptimizationFlag `json:"effective_predictive_optimization_flag,omitempty"` + ProvisioningInfo *DataSourceCatalogCatalogInfoProvisioningInfo `json:"provisioning_info,omitempty"` +} + +type DataSourceCatalog struct { + Id string `json:"id,omitempty"` + Name string `json:"name"` + CatalogInfo *DataSourceCatalogCatalogInfo `json:"catalog_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index e5ec5afb7..d517bbe0f 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -55,9 +55,9 @@ type DataSourceJobJobSettingsSettingsGitSource struct { } type DataSourceJobJobSettingsSettingsHealthRules struct { - Metric string `json:"metric,omitempty"` - Op string `json:"op,omitempty"` - Value int `json:"value,omitempty"` + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` } type DataSourceJobJobSettingsSettingsHealth struct { @@ -222,7 +222,7 @@ type DataSourceJobJobSettingsSettingsJobClusterNewCluster struct { } type DataSourceJobJobSettingsSettingsJobCluster struct { - JobClusterKey string `json:"job_cluster_key,omitempty"` + JobClusterKey string `json:"job_cluster_key"` NewCluster *DataSourceJobJobSettingsSettingsJobClusterNewCluster `json:"new_cluster,omitempty"` } @@ -533,9 +533,9 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications struc } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealthRules struct { - Metric string `json:"metric,omitempty"` - Op string `json:"op,omitempty"` - Value int `json:"value,omitempty"` + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` } type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealth struct { @@ -805,7 +805,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskQuery struct { type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTask struct { Parameters map[string]string `json:"parameters,omitempty"` - WarehouseId string `json:"warehouse_id,omitempty"` + WarehouseId string `json:"warehouse_id"` Alert *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlert `json:"alert,omitempty"` Dashboard *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboard `json:"dashboard,omitempty"` File *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskFile `json:"file,omitempty"` @@ -844,7 +844,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTask struct { MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key,omitempty"` + TaskKey string `json:"task_key"` TimeoutSeconds int `json:"timeout_seconds,omitempty"` ConditionTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` DbtTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` @@ -872,9 +872,9 @@ type DataSourceJobJobSettingsSettingsTaskForEachTask struct { } type DataSourceJobJobSettingsSettingsTaskHealthRules struct { - Metric string `json:"metric,omitempty"` - Op string `json:"op,omitempty"` - Value int `json:"value,omitempty"` + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` } type DataSourceJobJobSettingsSettingsTaskHealth struct { @@ -1144,7 +1144,7 @@ type DataSourceJobJobSettingsSettingsTaskSqlTaskQuery struct { type DataSourceJobJobSettingsSettingsTaskSqlTask struct { Parameters map[string]string `json:"parameters,omitempty"` - WarehouseId string `json:"warehouse_id,omitempty"` + WarehouseId string `json:"warehouse_id"` Alert *DataSourceJobJobSettingsSettingsTaskSqlTaskAlert `json:"alert,omitempty"` Dashboard *DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard `json:"dashboard,omitempty"` File *DataSourceJobJobSettingsSettingsTaskSqlTaskFile `json:"file,omitempty"` @@ -1183,7 +1183,7 @@ type DataSourceJobJobSettingsSettingsTask struct { MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key,omitempty"` + TaskKey string `json:"task_key"` TimeoutSeconds int `json:"timeout_seconds,omitempty"` ConditionTask *DataSourceJobJobSettingsSettingsTaskConditionTask `json:"condition_task,omitempty"` DbtTask *DataSourceJobJobSettingsSettingsTaskDbtTask `json:"dbt_task,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_mlflow_experiment.go b/bundle/internal/tf/schema/data_source_mlflow_experiment.go new file mode 100644 index 000000000..979130c5f --- /dev/null +++ b/bundle/internal/tf/schema/data_source_mlflow_experiment.go @@ -0,0 +1,19 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceMlflowExperimentTags struct { + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` +} + +type DataSourceMlflowExperiment struct { + ArtifactLocation string `json:"artifact_location,omitempty"` + CreationTime int `json:"creation_time,omitempty"` + ExperimentId string `json:"experiment_id,omitempty"` + Id string `json:"id,omitempty"` + LastUpdateTime int `json:"last_update_time,omitempty"` + LifecycleStage string `json:"lifecycle_stage,omitempty"` + Name string `json:"name,omitempty"` + Tags []DataSourceMlflowExperimentTags `json:"tags,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_table.go b/bundle/internal/tf/schema/data_source_table.go new file mode 100644 index 000000000..f59959696 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_table.go @@ -0,0 +1,127 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceTableTableInfoColumnsMask struct { + FunctionName string `json:"function_name,omitempty"` + UsingColumnNames []string `json:"using_column_names,omitempty"` +} + +type DataSourceTableTableInfoColumns struct { + Comment string `json:"comment,omitempty"` + Name string `json:"name,omitempty"` + Nullable bool `json:"nullable,omitempty"` + PartitionIndex int `json:"partition_index,omitempty"` + Position int `json:"position,omitempty"` + TypeIntervalType string `json:"type_interval_type,omitempty"` + TypeJson string `json:"type_json,omitempty"` + TypeName string `json:"type_name,omitempty"` + TypePrecision int `json:"type_precision,omitempty"` + TypeScale int `json:"type_scale,omitempty"` + TypeText string `json:"type_text,omitempty"` + Mask *DataSourceTableTableInfoColumnsMask `json:"mask,omitempty"` +} + +type DataSourceTableTableInfoDeltaRuntimePropertiesKvpairs struct { + DeltaRuntimeProperties map[string]string `json:"delta_runtime_properties"` +} + +type DataSourceTableTableInfoEffectivePredictiveOptimizationFlag struct { + InheritedFromName string `json:"inherited_from_name,omitempty"` + InheritedFromType string `json:"inherited_from_type,omitempty"` + Value string `json:"value"` +} + +type DataSourceTableTableInfoEncryptionDetailsSseEncryptionDetails struct { + Algorithm string `json:"algorithm,omitempty"` + AwsKmsKeyArn string `json:"aws_kms_key_arn,omitempty"` +} + +type DataSourceTableTableInfoEncryptionDetails struct { + SseEncryptionDetails *DataSourceTableTableInfoEncryptionDetailsSseEncryptionDetails `json:"sse_encryption_details,omitempty"` +} + +type DataSourceTableTableInfoRowFilter struct { + FunctionName string `json:"function_name"` + InputColumnNames []string `json:"input_column_names"` +} + +type DataSourceTableTableInfoTableConstraintsForeignKeyConstraint struct { + ChildColumns []string `json:"child_columns"` + Name string `json:"name"` + ParentColumns []string `json:"parent_columns"` + ParentTable string `json:"parent_table"` +} + +type DataSourceTableTableInfoTableConstraintsNamedTableConstraint struct { + Name string `json:"name"` +} + +type DataSourceTableTableInfoTableConstraintsPrimaryKeyConstraint struct { + ChildColumns []string `json:"child_columns"` + Name string `json:"name"` +} + +type DataSourceTableTableInfoTableConstraints struct { + ForeignKeyConstraint *DataSourceTableTableInfoTableConstraintsForeignKeyConstraint `json:"foreign_key_constraint,omitempty"` + NamedTableConstraint *DataSourceTableTableInfoTableConstraintsNamedTableConstraint `json:"named_table_constraint,omitempty"` + PrimaryKeyConstraint *DataSourceTableTableInfoTableConstraintsPrimaryKeyConstraint `json:"primary_key_constraint,omitempty"` +} + +type DataSourceTableTableInfoViewDependenciesDependenciesFunction struct { + FunctionFullName string `json:"function_full_name"` +} + +type DataSourceTableTableInfoViewDependenciesDependenciesTable struct { + TableFullName string `json:"table_full_name"` +} + +type DataSourceTableTableInfoViewDependenciesDependencies struct { + Function *DataSourceTableTableInfoViewDependenciesDependenciesFunction `json:"function,omitempty"` + Table *DataSourceTableTableInfoViewDependenciesDependenciesTable `json:"table,omitempty"` +} + +type DataSourceTableTableInfoViewDependencies struct { + Dependencies []DataSourceTableTableInfoViewDependenciesDependencies `json:"dependencies,omitempty"` +} + +type DataSourceTableTableInfo struct { + AccessPoint string `json:"access_point,omitempty"` + BrowseOnly bool `json:"browse_only,omitempty"` + CatalogName string `json:"catalog_name,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + DataAccessConfigurationId string `json:"data_access_configuration_id,omitempty"` + DataSourceFormat string `json:"data_source_format,omitempty"` + DeletedAt int `json:"deleted_at,omitempty"` + EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"` + FullName string `json:"full_name,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + PipelineId string `json:"pipeline_id,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + SchemaName string `json:"schema_name,omitempty"` + SqlPath string `json:"sql_path,omitempty"` + StorageCredentialName string `json:"storage_credential_name,omitempty"` + StorageLocation string `json:"storage_location,omitempty"` + TableId string `json:"table_id,omitempty"` + TableType string `json:"table_type,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + ViewDefinition string `json:"view_definition,omitempty"` + Columns []DataSourceTableTableInfoColumns `json:"columns,omitempty"` + DeltaRuntimePropertiesKvpairs *DataSourceTableTableInfoDeltaRuntimePropertiesKvpairs `json:"delta_runtime_properties_kvpairs,omitempty"` + EffectivePredictiveOptimizationFlag *DataSourceTableTableInfoEffectivePredictiveOptimizationFlag `json:"effective_predictive_optimization_flag,omitempty"` + EncryptionDetails *DataSourceTableTableInfoEncryptionDetails `json:"encryption_details,omitempty"` + RowFilter *DataSourceTableTableInfoRowFilter `json:"row_filter,omitempty"` + TableConstraints []DataSourceTableTableInfoTableConstraints `json:"table_constraints,omitempty"` + ViewDependencies *DataSourceTableTableInfoViewDependencies `json:"view_dependencies,omitempty"` +} + +type DataSourceTable struct { + Id string `json:"id,omitempty"` + Name string `json:"name"` + TableInfo *DataSourceTableTableInfo `json:"table_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 2e02c4388..c32483db0 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -7,6 +7,7 @@ type DataSources struct { AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` AwsUnityCatalogPolicy map[string]any `json:"databricks_aws_unity_catalog_policy,omitempty"` + Catalog map[string]any `json:"databricks_catalog,omitempty"` Catalogs map[string]any `json:"databricks_catalogs,omitempty"` Cluster map[string]any `json:"databricks_cluster,omitempty"` ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` @@ -26,6 +27,7 @@ type DataSources struct { Jobs map[string]any `json:"databricks_jobs,omitempty"` Metastore map[string]any `json:"databricks_metastore,omitempty"` Metastores map[string]any `json:"databricks_metastores,omitempty"` + MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` @@ -43,6 +45,7 @@ type DataSources struct { SqlWarehouses map[string]any `json:"databricks_sql_warehouses,omitempty"` StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` StorageCredentials map[string]any `json:"databricks_storage_credentials,omitempty"` + Table map[string]any `json:"databricks_table,omitempty"` Tables map[string]any `json:"databricks_tables,omitempty"` User map[string]any `json:"databricks_user,omitempty"` Views map[string]any `json:"databricks_views,omitempty"` @@ -56,6 +59,7 @@ func NewDataSources() *DataSources { AwsBucketPolicy: make(map[string]any), AwsCrossaccountPolicy: make(map[string]any), AwsUnityCatalogPolicy: make(map[string]any), + Catalog: make(map[string]any), Catalogs: make(map[string]any), Cluster: make(map[string]any), ClusterPolicy: make(map[string]any), @@ -75,6 +79,7 @@ func NewDataSources() *DataSources { Jobs: make(map[string]any), Metastore: make(map[string]any), Metastores: make(map[string]any), + MlflowExperiment: make(map[string]any), MlflowModel: make(map[string]any), MwsCredentials: make(map[string]any), MwsWorkspaces: make(map[string]any), @@ -92,6 +97,7 @@ func NewDataSources() *DataSources { SqlWarehouses: make(map[string]any), StorageCredential: make(map[string]any), StorageCredentials: make(map[string]any), + Table: make(map[string]any), Tables: make(map[string]any), User: make(map[string]any), Views: make(map[string]any), diff --git a/bundle/internal/tf/schema/resource_automatic_cluster_update_workspace_setting.go b/bundle/internal/tf/schema/resource_automatic_cluster_update_workspace_setting.go new file mode 100644 index 000000000..e95639de8 --- /dev/null +++ b/bundle/internal/tf/schema/resource_automatic_cluster_update_workspace_setting.go @@ -0,0 +1,39 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails struct { + ForcedForComplianceMode bool `json:"forced_for_compliance_mode,omitempty"` + UnavailableForDisabledEntitlement bool `json:"unavailable_for_disabled_entitlement,omitempty"` + UnavailableForNonEnterpriseTier bool `json:"unavailable_for_non_enterprise_tier,omitempty"` +} + +type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime struct { + Hours int `json:"hours,omitempty"` + Minutes int `json:"minutes,omitempty"` +} + +type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule struct { + DayOfWeek string `json:"day_of_week,omitempty"` + Frequency string `json:"frequency,omitempty"` + WindowStartTime *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime `json:"window_start_time,omitempty"` +} + +type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow struct { + WeekDayBasedSchedule *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule `json:"week_day_based_schedule,omitempty"` +} + +type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace struct { + CanToggle bool `json:"can_toggle,omitempty"` + Enabled bool `json:"enabled,omitempty"` + RestartEvenIfNoUpdatesAvailable bool `json:"restart_even_if_no_updates_available,omitempty"` + EnablementDetails *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails `json:"enablement_details,omitempty"` + MaintenanceWindow *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow `json:"maintenance_window,omitempty"` +} + +type ResourceAutomaticClusterUpdateWorkspaceSetting struct { + Etag string `json:"etag,omitempty"` + Id string `json:"id,omitempty"` + SettingName string `json:"setting_name,omitempty"` + AutomaticClusterUpdateWorkspace *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace `json:"automatic_cluster_update_workspace,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index 046e0bb43..e4106d049 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -32,10 +32,6 @@ type ResourceClusterAzureAttributes struct { LogAnalyticsInfo *ResourceClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } -type ResourceClusterCloneFrom struct { - SourceClusterId string `json:"source_cluster_id"` -} - type ResourceClusterClusterLogConfDbfs struct { Destination string `json:"destination"` } @@ -169,7 +165,6 @@ type ResourceCluster struct { AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` - ClusterSource string `json:"cluster_source,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` DataSecurityMode string `json:"data_security_mode,omitempty"` DefaultTags map[string]string `json:"default_tags,omitempty"` @@ -195,7 +190,6 @@ type ResourceCluster struct { Autoscale *ResourceClusterAutoscale `json:"autoscale,omitempty"` AwsAttributes *ResourceClusterAwsAttributes `json:"aws_attributes,omitempty"` AzureAttributes *ResourceClusterAzureAttributes `json:"azure_attributes,omitempty"` - CloneFrom *ResourceClusterCloneFrom `json:"clone_from,omitempty"` ClusterLogConf *ResourceClusterClusterLogConf `json:"cluster_log_conf,omitempty"` ClusterMountInfo []ResourceClusterClusterMountInfo `json:"cluster_mount_info,omitempty"` DockerImage *ResourceClusterDockerImage `json:"docker_image,omitempty"` diff --git a/bundle/internal/tf/schema/resource_compliance_security_profile_workspace_setting.go b/bundle/internal/tf/schema/resource_compliance_security_profile_workspace_setting.go new file mode 100644 index 000000000..50815f753 --- /dev/null +++ b/bundle/internal/tf/schema/resource_compliance_security_profile_workspace_setting.go @@ -0,0 +1,15 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceComplianceSecurityProfileWorkspaceSettingComplianceSecurityProfileWorkspace struct { + ComplianceStandards []string `json:"compliance_standards,omitempty"` + IsEnabled bool `json:"is_enabled,omitempty"` +} + +type ResourceComplianceSecurityProfileWorkspaceSetting struct { + Etag string `json:"etag,omitempty"` + Id string `json:"id,omitempty"` + SettingName string `json:"setting_name,omitempty"` + ComplianceSecurityProfileWorkspace *ResourceComplianceSecurityProfileWorkspaceSettingComplianceSecurityProfileWorkspace `json:"compliance_security_profile_workspace,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_enhanced_security_monitoring_workspace_setting.go b/bundle/internal/tf/schema/resource_enhanced_security_monitoring_workspace_setting.go new file mode 100644 index 000000000..2f552402a --- /dev/null +++ b/bundle/internal/tf/schema/resource_enhanced_security_monitoring_workspace_setting.go @@ -0,0 +1,14 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceEnhancedSecurityMonitoringWorkspaceSettingEnhancedSecurityMonitoringWorkspace struct { + IsEnabled bool `json:"is_enabled,omitempty"` +} + +type ResourceEnhancedSecurityMonitoringWorkspaceSetting struct { + Etag string `json:"etag,omitempty"` + Id string `json:"id,omitempty"` + SettingName string `json:"setting_name,omitempty"` + EnhancedSecurityMonitoringWorkspace *ResourceEnhancedSecurityMonitoringWorkspaceSettingEnhancedSecurityMonitoringWorkspace `json:"enhanced_security_monitoring_workspace,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 6958face8..0950073e2 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -39,6 +39,10 @@ type ResourceJobEnvironment struct { Spec *ResourceJobEnvironmentSpec `json:"spec,omitempty"` } +type ResourceJobGitSourceGitSnapshot struct { + UsedCommit string `json:"used_commit,omitempty"` +} + type ResourceJobGitSourceJobSource struct { DirtyState string `json:"dirty_state,omitempty"` ImportFromGitBranch string `json:"import_from_git_branch"` @@ -46,18 +50,19 @@ type ResourceJobGitSourceJobSource struct { } type ResourceJobGitSource struct { - Branch string `json:"branch,omitempty"` - Commit string `json:"commit,omitempty"` - Provider string `json:"provider,omitempty"` - Tag string `json:"tag,omitempty"` - Url string `json:"url"` - JobSource *ResourceJobGitSourceJobSource `json:"job_source,omitempty"` + Branch string `json:"branch,omitempty"` + Commit string `json:"commit,omitempty"` + Provider string `json:"provider,omitempty"` + Tag string `json:"tag,omitempty"` + Url string `json:"url"` + GitSnapshot *ResourceJobGitSourceGitSnapshot `json:"git_snapshot,omitempty"` + JobSource *ResourceJobGitSourceJobSource `json:"job_source,omitempty"` } type ResourceJobHealthRules struct { - Metric string `json:"metric,omitempty"` - Op string `json:"op,omitempty"` - Value int `json:"value,omitempty"` + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` } type ResourceJobHealth struct { @@ -72,7 +77,9 @@ type ResourceJobJobClusterNewClusterAutoscale struct { type ResourceJobJobClusterNewClusterAwsAttributes struct { Availability string `json:"availability,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -80,10 +87,16 @@ type ResourceJobJobClusterNewClusterAwsAttributes struct { ZoneId string `json:"zone_id,omitempty"` } +type ResourceJobJobClusterNewClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + type ResourceJobJobClusterNewClusterAzureAttributes struct { - Availability string `json:"availability,omitempty"` - FirstOnDemand int `json:"first_on_demand,omitempty"` - SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourceJobJobClusterNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } type ResourceJobJobClusterNewClusterClusterLogConfDbfs struct { @@ -179,6 +192,32 @@ type ResourceJobJobClusterNewClusterInitScripts struct { Workspace *ResourceJobJobClusterNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } +type ResourceJobJobClusterNewClusterLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobJobClusterNewClusterLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobJobClusterNewClusterLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobJobClusterNewClusterLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobJobClusterNewClusterLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobJobClusterNewClusterLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobJobClusterNewClusterLibraryPypi `json:"pypi,omitempty"` +} + type ResourceJobJobClusterNewClusterWorkloadTypeClients struct { Jobs bool `json:"jobs,omitempty"` Notebooks bool `json:"notebooks,omitempty"` @@ -190,7 +229,6 @@ type ResourceJobJobClusterNewClusterWorkloadType struct { type ResourceJobJobClusterNewCluster struct { ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` - AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` @@ -218,11 +256,12 @@ type ResourceJobJobClusterNewCluster struct { DockerImage *ResourceJobJobClusterNewClusterDockerImage `json:"docker_image,omitempty"` GcpAttributes *ResourceJobJobClusterNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` InitScripts []ResourceJobJobClusterNewClusterInitScripts `json:"init_scripts,omitempty"` + Library []ResourceJobJobClusterNewClusterLibrary `json:"library,omitempty"` WorkloadType *ResourceJobJobClusterNewClusterWorkloadType `json:"workload_type,omitempty"` } type ResourceJobJobCluster struct { - JobClusterKey string `json:"job_cluster_key,omitempty"` + JobClusterKey string `json:"job_cluster_key"` NewCluster *ResourceJobJobClusterNewCluster `json:"new_cluster,omitempty"` } @@ -260,7 +299,9 @@ type ResourceJobNewClusterAutoscale struct { type ResourceJobNewClusterAwsAttributes struct { Availability string `json:"availability,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -268,10 +309,16 @@ type ResourceJobNewClusterAwsAttributes struct { ZoneId string `json:"zone_id,omitempty"` } +type ResourceJobNewClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + type ResourceJobNewClusterAzureAttributes struct { - Availability string `json:"availability,omitempty"` - FirstOnDemand int `json:"first_on_demand,omitempty"` - SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourceJobNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } type ResourceJobNewClusterClusterLogConfDbfs struct { @@ -367,6 +414,32 @@ type ResourceJobNewClusterInitScripts struct { Workspace *ResourceJobNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } +type ResourceJobNewClusterLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobNewClusterLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobNewClusterLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobNewClusterLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobNewClusterLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobNewClusterLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobNewClusterLibraryPypi `json:"pypi,omitempty"` +} + type ResourceJobNewClusterWorkloadTypeClients struct { Jobs bool `json:"jobs,omitempty"` Notebooks bool `json:"notebooks,omitempty"` @@ -378,7 +451,6 @@ type ResourceJobNewClusterWorkloadType struct { type ResourceJobNewCluster struct { ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` - AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` @@ -406,6 +478,7 @@ type ResourceJobNewCluster struct { DockerImage *ResourceJobNewClusterDockerImage `json:"docker_image,omitempty"` GcpAttributes *ResourceJobNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` InitScripts []ResourceJobNewClusterInitScripts `json:"init_scripts,omitempty"` + Library []ResourceJobNewClusterLibrary `json:"library,omitempty"` WorkloadType *ResourceJobNewClusterWorkloadType `json:"workload_type,omitempty"` } @@ -533,9 +606,9 @@ type ResourceJobTaskForEachTaskTaskEmailNotifications struct { } type ResourceJobTaskForEachTaskTaskHealthRules struct { - Metric string `json:"metric,omitempty"` - Op string `json:"op,omitempty"` - Value int `json:"value,omitempty"` + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` } type ResourceJobTaskForEachTaskTaskHealth struct { @@ -576,7 +649,9 @@ type ResourceJobTaskForEachTaskTaskNewClusterAutoscale struct { type ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes struct { Availability string `json:"availability,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -584,10 +659,16 @@ type ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes struct { ZoneId string `json:"zone_id,omitempty"` } +type ResourceJobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + type ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes struct { - Availability string `json:"availability,omitempty"` - FirstOnDemand int `json:"first_on_demand,omitempty"` - SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourceJobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfDbfs struct { @@ -683,6 +764,32 @@ type ResourceJobTaskForEachTaskTaskNewClusterInitScripts struct { Workspace *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } +type ResourceJobTaskForEachTaskTaskNewClusterLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobTaskForEachTaskTaskNewClusterLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobTaskForEachTaskTaskNewClusterLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobTaskForEachTaskTaskNewClusterLibraryPypi `json:"pypi,omitempty"` +} + type ResourceJobTaskForEachTaskTaskNewClusterWorkloadTypeClients struct { Jobs bool `json:"jobs,omitempty"` Notebooks bool `json:"notebooks,omitempty"` @@ -694,7 +801,6 @@ type ResourceJobTaskForEachTaskTaskNewClusterWorkloadType struct { type ResourceJobTaskForEachTaskTaskNewCluster struct { ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` - AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` @@ -706,7 +812,7 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct { IdempotencyToken string `json:"idempotency_token,omitempty"` InstancePoolId string `json:"instance_pool_id,omitempty"` NodeTypeId string `json:"node_type_id,omitempty"` - NumWorkers int `json:"num_workers"` + NumWorkers int `json:"num_workers,omitempty"` PolicyId string `json:"policy_id,omitempty"` RuntimeEngine string `json:"runtime_engine,omitempty"` SingleUserName string `json:"single_user_name,omitempty"` @@ -722,6 +828,7 @@ type ResourceJobTaskForEachTaskTaskNewCluster struct { DockerImage *ResourceJobTaskForEachTaskTaskNewClusterDockerImage `json:"docker_image,omitempty"` GcpAttributes *ResourceJobTaskForEachTaskTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` InitScripts []ResourceJobTaskForEachTaskTaskNewClusterInitScripts `json:"init_scripts,omitempty"` + Library []ResourceJobTaskForEachTaskTaskNewClusterLibrary `json:"library,omitempty"` WorkloadType *ResourceJobTaskForEachTaskTaskNewClusterWorkloadType `json:"workload_type,omitempty"` } @@ -750,9 +857,21 @@ type ResourceJobTaskForEachTaskTaskPythonWheelTask struct { Parameters []string `json:"parameters,omitempty"` } +type ResourceJobTaskForEachTaskTaskRunJobTaskPipelineParams struct { + FullRefresh bool `json:"full_refresh,omitempty"` +} + type ResourceJobTaskForEachTaskTaskRunJobTask struct { - JobId int `json:"job_id"` - JobParameters map[string]string `json:"job_parameters,omitempty"` + DbtCommands []string `json:"dbt_commands,omitempty"` + JarParams []string `json:"jar_params,omitempty"` + JobId int `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` + NotebookParams map[string]string `json:"notebook_params,omitempty"` + PythonNamedParams map[string]string `json:"python_named_params,omitempty"` + PythonParams []string `json:"python_params,omitempty"` + SparkSubmitParams []string `json:"spark_submit_params,omitempty"` + SqlParams map[string]string `json:"sql_params,omitempty"` + PipelineParams *ResourceJobTaskForEachTaskTaskRunJobTaskPipelineParams `json:"pipeline_params,omitempty"` } type ResourceJobTaskForEachTaskTaskSparkJarTask struct { @@ -805,7 +924,7 @@ type ResourceJobTaskForEachTaskTaskSqlTaskQuery struct { type ResourceJobTaskForEachTaskTaskSqlTask struct { Parameters map[string]string `json:"parameters,omitempty"` - WarehouseId string `json:"warehouse_id,omitempty"` + WarehouseId string `json:"warehouse_id"` Alert *ResourceJobTaskForEachTaskTaskSqlTaskAlert `json:"alert,omitempty"` Dashboard *ResourceJobTaskForEachTaskTaskSqlTaskDashboard `json:"dashboard,omitempty"` File *ResourceJobTaskForEachTaskTaskSqlTaskFile `json:"file,omitempty"` @@ -836,33 +955,34 @@ type ResourceJobTaskForEachTaskTaskWebhookNotifications struct { } type ResourceJobTaskForEachTaskTask struct { - Description string `json:"description,omitempty"` - EnvironmentKey string `json:"environment_key,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key,omitempty"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` - Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"` - Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"` - NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` - NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` - PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` - SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` - WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` + Description string `json:"description,omitempty"` + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` + Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type ResourceJobTaskForEachTask struct { @@ -872,9 +992,9 @@ type ResourceJobTaskForEachTask struct { } type ResourceJobTaskHealthRules struct { - Metric string `json:"metric,omitempty"` - Op string `json:"op,omitempty"` - Value int `json:"value,omitempty"` + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` } type ResourceJobTaskHealth struct { @@ -915,7 +1035,9 @@ type ResourceJobTaskNewClusterAutoscale struct { type ResourceJobTaskNewClusterAwsAttributes struct { Availability string `json:"availability,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -923,10 +1045,16 @@ type ResourceJobTaskNewClusterAwsAttributes struct { ZoneId string `json:"zone_id,omitempty"` } +type ResourceJobTaskNewClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + type ResourceJobTaskNewClusterAzureAttributes struct { - Availability string `json:"availability,omitempty"` - FirstOnDemand int `json:"first_on_demand,omitempty"` - SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourceJobTaskNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } type ResourceJobTaskNewClusterClusterLogConfDbfs struct { @@ -1022,6 +1150,32 @@ type ResourceJobTaskNewClusterInitScripts struct { Workspace *ResourceJobTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } +type ResourceJobTaskNewClusterLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskNewClusterLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskNewClusterLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskNewClusterLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobTaskNewClusterLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobTaskNewClusterLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobTaskNewClusterLibraryPypi `json:"pypi,omitempty"` +} + type ResourceJobTaskNewClusterWorkloadTypeClients struct { Jobs bool `json:"jobs,omitempty"` Notebooks bool `json:"notebooks,omitempty"` @@ -1033,7 +1187,6 @@ type ResourceJobTaskNewClusterWorkloadType struct { type ResourceJobTaskNewCluster struct { ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` - AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` @@ -1061,6 +1214,7 @@ type ResourceJobTaskNewCluster struct { DockerImage *ResourceJobTaskNewClusterDockerImage `json:"docker_image,omitempty"` GcpAttributes *ResourceJobTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` InitScripts []ResourceJobTaskNewClusterInitScripts `json:"init_scripts,omitempty"` + Library []ResourceJobTaskNewClusterLibrary `json:"library,omitempty"` WorkloadType *ResourceJobTaskNewClusterWorkloadType `json:"workload_type,omitempty"` } @@ -1089,9 +1243,21 @@ type ResourceJobTaskPythonWheelTask struct { Parameters []string `json:"parameters,omitempty"` } +type ResourceJobTaskRunJobTaskPipelineParams struct { + FullRefresh bool `json:"full_refresh,omitempty"` +} + type ResourceJobTaskRunJobTask struct { - JobId int `json:"job_id"` - JobParameters map[string]string `json:"job_parameters,omitempty"` + DbtCommands []string `json:"dbt_commands,omitempty"` + JarParams []string `json:"jar_params,omitempty"` + JobId int `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` + NotebookParams map[string]string `json:"notebook_params,omitempty"` + PythonNamedParams map[string]string `json:"python_named_params,omitempty"` + PythonParams []string `json:"python_params,omitempty"` + SparkSubmitParams []string `json:"spark_submit_params,omitempty"` + SqlParams map[string]string `json:"sql_params,omitempty"` + PipelineParams *ResourceJobTaskRunJobTaskPipelineParams `json:"pipeline_params,omitempty"` } type ResourceJobTaskSparkJarTask struct { @@ -1144,7 +1310,7 @@ type ResourceJobTaskSqlTaskQuery struct { type ResourceJobTaskSqlTask struct { Parameters map[string]string `json:"parameters,omitempty"` - WarehouseId string `json:"warehouse_id,omitempty"` + WarehouseId string `json:"warehouse_id"` Alert *ResourceJobTaskSqlTaskAlert `json:"alert,omitempty"` Dashboard *ResourceJobTaskSqlTaskDashboard `json:"dashboard,omitempty"` File *ResourceJobTaskSqlTaskFile `json:"file,omitempty"` @@ -1175,34 +1341,35 @@ type ResourceJobTaskWebhookNotifications struct { } type ResourceJobTask struct { - Description string `json:"description,omitempty"` - EnvironmentKey string `json:"environment_key,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key,omitempty"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` - ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"` - Health *ResourceJobTaskHealth `json:"health,omitempty"` - Library []ResourceJobTaskLibrary `json:"library,omitempty"` - NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` - NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` - PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` - SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` - WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` + Description string `json:"description,omitempty"` + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` + ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"` + Health *ResourceJobTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type ResourceJobTriggerFileArrival struct { @@ -1211,6 +1378,13 @@ type ResourceJobTriggerFileArrival struct { WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` } +type ResourceJobTriggerTable struct { + Condition string `json:"condition,omitempty"` + MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` + TableNames []string `json:"table_names,omitempty"` + WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` +} + type ResourceJobTriggerTableUpdate struct { Condition string `json:"condition,omitempty"` MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` @@ -1221,6 +1395,7 @@ type ResourceJobTriggerTableUpdate struct { type ResourceJobTrigger struct { PauseStatus string `json:"pause_status,omitempty"` FileArrival *ResourceJobTriggerFileArrival `json:"file_arrival,omitempty"` + Table *ResourceJobTriggerTable `json:"table,omitempty"` TableUpdate *ResourceJobTriggerTableUpdate `json:"table_update,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_model_serving.go b/bundle/internal/tf/schema/resource_model_serving.go index a74a544ed..f5ffbbe5e 100644 --- a/bundle/internal/tf/schema/resource_model_serving.go +++ b/bundle/internal/tf/schema/resource_model_serving.go @@ -34,12 +34,15 @@ type ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServing } type ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig struct { - OpenaiApiBase string `json:"openai_api_base,omitempty"` - OpenaiApiKey string `json:"openai_api_key"` - OpenaiApiType string `json:"openai_api_type,omitempty"` - OpenaiApiVersion string `json:"openai_api_version,omitempty"` - OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"` - OpenaiOrganization string `json:"openai_organization,omitempty"` + MicrosoftEntraClientId string `json:"microsoft_entra_client_id,omitempty"` + MicrosoftEntraClientSecret string `json:"microsoft_entra_client_secret,omitempty"` + MicrosoftEntraTenantId string `json:"microsoft_entra_tenant_id,omitempty"` + OpenaiApiBase string `json:"openai_api_base,omitempty"` + OpenaiApiKey string `json:"openai_api_key,omitempty"` + OpenaiApiType string `json:"openai_api_type,omitempty"` + OpenaiApiVersion string `json:"openai_api_version,omitempty"` + OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"` + OpenaiOrganization string `json:"openai_organization,omitempty"` } type ResourceModelServingConfigServedEntitiesExternalModelPalmConfig struct { @@ -114,6 +117,7 @@ type ResourceModelServingTags struct { type ResourceModelServing struct { Id string `json:"id,omitempty"` Name string `json:"name"` + RouteOptimized bool `json:"route_optimized,omitempty"` ServingEndpointId string `json:"serving_endpoint_id,omitempty"` Config *ResourceModelServingConfig `json:"config,omitempty"` RateLimits []ResourceModelServingRateLimits `json:"rate_limits,omitempty"` diff --git a/bundle/internal/tf/schema/resource_quality_monitor.go b/bundle/internal/tf/schema/resource_quality_monitor.go new file mode 100644 index 000000000..0fc2abd66 --- /dev/null +++ b/bundle/internal/tf/schema/resource_quality_monitor.go @@ -0,0 +1,76 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceQualityMonitorCustomMetrics struct { + Definition string `json:"definition"` + InputColumns []string `json:"input_columns"` + Name string `json:"name"` + OutputDataType string `json:"output_data_type"` + Type string `json:"type"` +} + +type ResourceQualityMonitorDataClassificationConfig struct { + Enabled bool `json:"enabled,omitempty"` +} + +type ResourceQualityMonitorInferenceLog struct { + Granularities []string `json:"granularities"` + LabelCol string `json:"label_col,omitempty"` + ModelIdCol string `json:"model_id_col"` + PredictionCol string `json:"prediction_col"` + PredictionProbaCol string `json:"prediction_proba_col,omitempty"` + ProblemType string `json:"problem_type"` + TimestampCol string `json:"timestamp_col"` +} + +type ResourceQualityMonitorNotificationsOnFailure struct { + EmailAddresses []string `json:"email_addresses,omitempty"` +} + +type ResourceQualityMonitorNotificationsOnNewClassificationTagDetected struct { + EmailAddresses []string `json:"email_addresses,omitempty"` +} + +type ResourceQualityMonitorNotifications struct { + OnFailure *ResourceQualityMonitorNotificationsOnFailure `json:"on_failure,omitempty"` + OnNewClassificationTagDetected *ResourceQualityMonitorNotificationsOnNewClassificationTagDetected `json:"on_new_classification_tag_detected,omitempty"` +} + +type ResourceQualityMonitorSchedule struct { + PauseStatus string `json:"pause_status,omitempty"` + QuartzCronExpression string `json:"quartz_cron_expression"` + TimezoneId string `json:"timezone_id"` +} + +type ResourceQualityMonitorSnapshot struct { +} + +type ResourceQualityMonitorTimeSeries struct { + Granularities []string `json:"granularities"` + TimestampCol string `json:"timestamp_col"` +} + +type ResourceQualityMonitor struct { + AssetsDir string `json:"assets_dir"` + BaselineTableName string `json:"baseline_table_name,omitempty"` + DashboardId string `json:"dashboard_id,omitempty"` + DriftMetricsTableName string `json:"drift_metrics_table_name,omitempty"` + Id string `json:"id,omitempty"` + LatestMonitorFailureMsg string `json:"latest_monitor_failure_msg,omitempty"` + MonitorVersion string `json:"monitor_version,omitempty"` + OutputSchemaName string `json:"output_schema_name"` + ProfileMetricsTableName string `json:"profile_metrics_table_name,omitempty"` + SkipBuiltinDashboard bool `json:"skip_builtin_dashboard,omitempty"` + SlicingExprs []string `json:"slicing_exprs,omitempty"` + Status string `json:"status,omitempty"` + TableName string `json:"table_name"` + WarehouseId string `json:"warehouse_id,omitempty"` + CustomMetrics []ResourceQualityMonitorCustomMetrics `json:"custom_metrics,omitempty"` + DataClassificationConfig *ResourceQualityMonitorDataClassificationConfig `json:"data_classification_config,omitempty"` + InferenceLog *ResourceQualityMonitorInferenceLog `json:"inference_log,omitempty"` + Notifications *ResourceQualityMonitorNotifications `json:"notifications,omitempty"` + Schedule *ResourceQualityMonitorSchedule `json:"schedule,omitempty"` + Snapshot *ResourceQualityMonitorSnapshot `json:"snapshot,omitempty"` + TimeSeries *ResourceQualityMonitorTimeSeries `json:"time_series,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_sql_table.go b/bundle/internal/tf/schema/resource_sql_table.go index 97a8977bc..51fb3bc0d 100644 --- a/bundle/internal/tf/schema/resource_sql_table.go +++ b/bundle/internal/tf/schema/resource_sql_table.go @@ -18,6 +18,7 @@ type ResourceSqlTable struct { Id string `json:"id,omitempty"` Name string `json:"name"` Options map[string]string `json:"options,omitempty"` + Owner string `json:"owner,omitempty"` Partitions []string `json:"partitions,omitempty"` Properties map[string]string `json:"properties,omitempty"` SchemaName string `json:"schema_name"` diff --git a/bundle/internal/tf/schema/resource_vector_search_index.go b/bundle/internal/tf/schema/resource_vector_search_index.go index 06f666656..2ce51576d 100644 --- a/bundle/internal/tf/schema/resource_vector_search_index.go +++ b/bundle/internal/tf/schema/resource_vector_search_index.go @@ -13,11 +13,12 @@ type ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingVectorColumns struct { } type ResourceVectorSearchIndexDeltaSyncIndexSpec struct { - PipelineId string `json:"pipeline_id,omitempty"` - PipelineType string `json:"pipeline_type,omitempty"` - SourceTable string `json:"source_table,omitempty"` - EmbeddingSourceColumns []ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingSourceColumns `json:"embedding_source_columns,omitempty"` - EmbeddingVectorColumns []ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingVectorColumns `json:"embedding_vector_columns,omitempty"` + EmbeddingWritebackTable string `json:"embedding_writeback_table,omitempty"` + PipelineId string `json:"pipeline_id,omitempty"` + PipelineType string `json:"pipeline_type,omitempty"` + SourceTable string `json:"source_table,omitempty"` + EmbeddingSourceColumns []ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingSourceColumns `json:"embedding_source_columns,omitempty"` + EmbeddingVectorColumns []ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingVectorColumns `json:"embedding_vector_columns,omitempty"` } type ResourceVectorSearchIndexDirectAccessIndexSpecEmbeddingSourceColumns struct { diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index e5eacc867..79d71a65f 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -3,115 +3,122 @@ package schema type Resources struct { - AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` - ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` - AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` - AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"` - AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"` - AzureBlobMount map[string]any `json:"databricks_azure_blob_mount,omitempty"` - Catalog map[string]any `json:"databricks_catalog,omitempty"` - CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"` - Cluster map[string]any `json:"databricks_cluster,omitempty"` - ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` - Connection map[string]any `json:"databricks_connection,omitempty"` - DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` - DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"` - Directory map[string]any `json:"databricks_directory,omitempty"` - Entitlements map[string]any `json:"databricks_entitlements,omitempty"` - ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` - File map[string]any `json:"databricks_file,omitempty"` - GitCredential map[string]any `json:"databricks_git_credential,omitempty"` - GlobalInitScript map[string]any `json:"databricks_global_init_script,omitempty"` - Grant map[string]any `json:"databricks_grant,omitempty"` - Grants map[string]any `json:"databricks_grants,omitempty"` - Group map[string]any `json:"databricks_group,omitempty"` - GroupInstanceProfile map[string]any `json:"databricks_group_instance_profile,omitempty"` - GroupMember map[string]any `json:"databricks_group_member,omitempty"` - GroupRole map[string]any `json:"databricks_group_role,omitempty"` - InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` - InstanceProfile map[string]any `json:"databricks_instance_profile,omitempty"` - IpAccessList map[string]any `json:"databricks_ip_access_list,omitempty"` - Job map[string]any `json:"databricks_job,omitempty"` - LakehouseMonitor map[string]any `json:"databricks_lakehouse_monitor,omitempty"` - Library map[string]any `json:"databricks_library,omitempty"` - Metastore map[string]any `json:"databricks_metastore,omitempty"` - MetastoreAssignment map[string]any `json:"databricks_metastore_assignment,omitempty"` - MetastoreDataAccess map[string]any `json:"databricks_metastore_data_access,omitempty"` - MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` - MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` - MlflowWebhook map[string]any `json:"databricks_mlflow_webhook,omitempty"` - ModelServing map[string]any `json:"databricks_model_serving,omitempty"` - Mount map[string]any `json:"databricks_mount,omitempty"` - MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` - MwsCustomerManagedKeys map[string]any `json:"databricks_mws_customer_managed_keys,omitempty"` - MwsLogDelivery map[string]any `json:"databricks_mws_log_delivery,omitempty"` - MwsNccBinding map[string]any `json:"databricks_mws_ncc_binding,omitempty"` - MwsNccPrivateEndpointRule map[string]any `json:"databricks_mws_ncc_private_endpoint_rule,omitempty"` - MwsNetworkConnectivityConfig map[string]any `json:"databricks_mws_network_connectivity_config,omitempty"` - MwsNetworks map[string]any `json:"databricks_mws_networks,omitempty"` - MwsPermissionAssignment map[string]any `json:"databricks_mws_permission_assignment,omitempty"` - MwsPrivateAccessSettings map[string]any `json:"databricks_mws_private_access_settings,omitempty"` - MwsStorageConfigurations map[string]any `json:"databricks_mws_storage_configurations,omitempty"` - MwsVpcEndpoint map[string]any `json:"databricks_mws_vpc_endpoint,omitempty"` - MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` - Notebook map[string]any `json:"databricks_notebook,omitempty"` - OboToken map[string]any `json:"databricks_obo_token,omitempty"` - OnlineTable map[string]any `json:"databricks_online_table,omitempty"` - PermissionAssignment map[string]any `json:"databricks_permission_assignment,omitempty"` - Permissions map[string]any `json:"databricks_permissions,omitempty"` - Pipeline map[string]any `json:"databricks_pipeline,omitempty"` - Provider map[string]any `json:"databricks_provider,omitempty"` - Recipient map[string]any `json:"databricks_recipient,omitempty"` - RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` - Repo map[string]any `json:"databricks_repo,omitempty"` - RestrictWorkspaceAdminsSetting map[string]any `json:"databricks_restrict_workspace_admins_setting,omitempty"` - Schema map[string]any `json:"databricks_schema,omitempty"` - Secret map[string]any `json:"databricks_secret,omitempty"` - SecretAcl map[string]any `json:"databricks_secret_acl,omitempty"` - SecretScope map[string]any `json:"databricks_secret_scope,omitempty"` - ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` - ServicePrincipalRole map[string]any `json:"databricks_service_principal_role,omitempty"` - ServicePrincipalSecret map[string]any `json:"databricks_service_principal_secret,omitempty"` - Share map[string]any `json:"databricks_share,omitempty"` - SqlAlert map[string]any `json:"databricks_sql_alert,omitempty"` - SqlDashboard map[string]any `json:"databricks_sql_dashboard,omitempty"` - SqlEndpoint map[string]any `json:"databricks_sql_endpoint,omitempty"` - SqlGlobalConfig map[string]any `json:"databricks_sql_global_config,omitempty"` - SqlPermissions map[string]any `json:"databricks_sql_permissions,omitempty"` - SqlQuery map[string]any `json:"databricks_sql_query,omitempty"` - SqlTable map[string]any `json:"databricks_sql_table,omitempty"` - SqlVisualization map[string]any `json:"databricks_sql_visualization,omitempty"` - SqlWidget map[string]any `json:"databricks_sql_widget,omitempty"` - StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` - SystemSchema map[string]any `json:"databricks_system_schema,omitempty"` - Table map[string]any `json:"databricks_table,omitempty"` - Token map[string]any `json:"databricks_token,omitempty"` - User map[string]any `json:"databricks_user,omitempty"` - UserInstanceProfile map[string]any `json:"databricks_user_instance_profile,omitempty"` - UserRole map[string]any `json:"databricks_user_role,omitempty"` - VectorSearchEndpoint map[string]any `json:"databricks_vector_search_endpoint,omitempty"` - VectorSearchIndex map[string]any `json:"databricks_vector_search_index,omitempty"` - Volume map[string]any `json:"databricks_volume,omitempty"` - WorkspaceConf map[string]any `json:"databricks_workspace_conf,omitempty"` - WorkspaceFile map[string]any `json:"databricks_workspace_file,omitempty"` + AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` + ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` + AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"` + AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` + AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"` + AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"` + AzureBlobMount map[string]any `json:"databricks_azure_blob_mount,omitempty"` + Catalog map[string]any `json:"databricks_catalog,omitempty"` + CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"` + Cluster map[string]any `json:"databricks_cluster,omitempty"` + ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` + ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"` + Connection map[string]any `json:"databricks_connection,omitempty"` + DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` + DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"` + Directory map[string]any `json:"databricks_directory,omitempty"` + EnhancedSecurityMonitoringWorkspaceSetting map[string]any `json:"databricks_enhanced_security_monitoring_workspace_setting,omitempty"` + Entitlements map[string]any `json:"databricks_entitlements,omitempty"` + ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` + File map[string]any `json:"databricks_file,omitempty"` + GitCredential map[string]any `json:"databricks_git_credential,omitempty"` + GlobalInitScript map[string]any `json:"databricks_global_init_script,omitempty"` + Grant map[string]any `json:"databricks_grant,omitempty"` + Grants map[string]any `json:"databricks_grants,omitempty"` + Group map[string]any `json:"databricks_group,omitempty"` + GroupInstanceProfile map[string]any `json:"databricks_group_instance_profile,omitempty"` + GroupMember map[string]any `json:"databricks_group_member,omitempty"` + GroupRole map[string]any `json:"databricks_group_role,omitempty"` + InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` + InstanceProfile map[string]any `json:"databricks_instance_profile,omitempty"` + IpAccessList map[string]any `json:"databricks_ip_access_list,omitempty"` + Job map[string]any `json:"databricks_job,omitempty"` + LakehouseMonitor map[string]any `json:"databricks_lakehouse_monitor,omitempty"` + Library map[string]any `json:"databricks_library,omitempty"` + Metastore map[string]any `json:"databricks_metastore,omitempty"` + MetastoreAssignment map[string]any `json:"databricks_metastore_assignment,omitempty"` + MetastoreDataAccess map[string]any `json:"databricks_metastore_data_access,omitempty"` + MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` + MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` + MlflowWebhook map[string]any `json:"databricks_mlflow_webhook,omitempty"` + ModelServing map[string]any `json:"databricks_model_serving,omitempty"` + Mount map[string]any `json:"databricks_mount,omitempty"` + MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` + MwsCustomerManagedKeys map[string]any `json:"databricks_mws_customer_managed_keys,omitempty"` + MwsLogDelivery map[string]any `json:"databricks_mws_log_delivery,omitempty"` + MwsNccBinding map[string]any `json:"databricks_mws_ncc_binding,omitempty"` + MwsNccPrivateEndpointRule map[string]any `json:"databricks_mws_ncc_private_endpoint_rule,omitempty"` + MwsNetworkConnectivityConfig map[string]any `json:"databricks_mws_network_connectivity_config,omitempty"` + MwsNetworks map[string]any `json:"databricks_mws_networks,omitempty"` + MwsPermissionAssignment map[string]any `json:"databricks_mws_permission_assignment,omitempty"` + MwsPrivateAccessSettings map[string]any `json:"databricks_mws_private_access_settings,omitempty"` + MwsStorageConfigurations map[string]any `json:"databricks_mws_storage_configurations,omitempty"` + MwsVpcEndpoint map[string]any `json:"databricks_mws_vpc_endpoint,omitempty"` + MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` + Notebook map[string]any `json:"databricks_notebook,omitempty"` + OboToken map[string]any `json:"databricks_obo_token,omitempty"` + OnlineTable map[string]any `json:"databricks_online_table,omitempty"` + PermissionAssignment map[string]any `json:"databricks_permission_assignment,omitempty"` + Permissions map[string]any `json:"databricks_permissions,omitempty"` + Pipeline map[string]any `json:"databricks_pipeline,omitempty"` + Provider map[string]any `json:"databricks_provider,omitempty"` + QualityMonitor map[string]any `json:"databricks_quality_monitor,omitempty"` + Recipient map[string]any `json:"databricks_recipient,omitempty"` + RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` + Repo map[string]any `json:"databricks_repo,omitempty"` + RestrictWorkspaceAdminsSetting map[string]any `json:"databricks_restrict_workspace_admins_setting,omitempty"` + Schema map[string]any `json:"databricks_schema,omitempty"` + Secret map[string]any `json:"databricks_secret,omitempty"` + SecretAcl map[string]any `json:"databricks_secret_acl,omitempty"` + SecretScope map[string]any `json:"databricks_secret_scope,omitempty"` + ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` + ServicePrincipalRole map[string]any `json:"databricks_service_principal_role,omitempty"` + ServicePrincipalSecret map[string]any `json:"databricks_service_principal_secret,omitempty"` + Share map[string]any `json:"databricks_share,omitempty"` + SqlAlert map[string]any `json:"databricks_sql_alert,omitempty"` + SqlDashboard map[string]any `json:"databricks_sql_dashboard,omitempty"` + SqlEndpoint map[string]any `json:"databricks_sql_endpoint,omitempty"` + SqlGlobalConfig map[string]any `json:"databricks_sql_global_config,omitempty"` + SqlPermissions map[string]any `json:"databricks_sql_permissions,omitempty"` + SqlQuery map[string]any `json:"databricks_sql_query,omitempty"` + SqlTable map[string]any `json:"databricks_sql_table,omitempty"` + SqlVisualization map[string]any `json:"databricks_sql_visualization,omitempty"` + SqlWidget map[string]any `json:"databricks_sql_widget,omitempty"` + StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` + SystemSchema map[string]any `json:"databricks_system_schema,omitempty"` + Table map[string]any `json:"databricks_table,omitempty"` + Token map[string]any `json:"databricks_token,omitempty"` + User map[string]any `json:"databricks_user,omitempty"` + UserInstanceProfile map[string]any `json:"databricks_user_instance_profile,omitempty"` + UserRole map[string]any `json:"databricks_user_role,omitempty"` + VectorSearchEndpoint map[string]any `json:"databricks_vector_search_endpoint,omitempty"` + VectorSearchIndex map[string]any `json:"databricks_vector_search_index,omitempty"` + Volume map[string]any `json:"databricks_volume,omitempty"` + WorkspaceConf map[string]any `json:"databricks_workspace_conf,omitempty"` + WorkspaceFile map[string]any `json:"databricks_workspace_file,omitempty"` } func NewResources() *Resources { return &Resources{ - AccessControlRuleSet: make(map[string]any), - ArtifactAllowlist: make(map[string]any), - AwsS3Mount: make(map[string]any), - AzureAdlsGen1Mount: make(map[string]any), - AzureAdlsGen2Mount: make(map[string]any), - AzureBlobMount: make(map[string]any), - Catalog: make(map[string]any), - CatalogWorkspaceBinding: make(map[string]any), - Cluster: make(map[string]any), - ClusterPolicy: make(map[string]any), - Connection: make(map[string]any), - DbfsFile: make(map[string]any), - DefaultNamespaceSetting: make(map[string]any), - Directory: make(map[string]any), + AccessControlRuleSet: make(map[string]any), + ArtifactAllowlist: make(map[string]any), + AutomaticClusterUpdateWorkspaceSetting: make(map[string]any), + AwsS3Mount: make(map[string]any), + AzureAdlsGen1Mount: make(map[string]any), + AzureAdlsGen2Mount: make(map[string]any), + AzureBlobMount: make(map[string]any), + Catalog: make(map[string]any), + CatalogWorkspaceBinding: make(map[string]any), + Cluster: make(map[string]any), + ClusterPolicy: make(map[string]any), + ComplianceSecurityProfileWorkspaceSetting: make(map[string]any), + Connection: make(map[string]any), + DbfsFile: make(map[string]any), + DefaultNamespaceSetting: make(map[string]any), + Directory: make(map[string]any), + EnhancedSecurityMonitoringWorkspaceSetting: make(map[string]any), Entitlements: make(map[string]any), ExternalLocation: make(map[string]any), File: make(map[string]any), @@ -156,6 +163,7 @@ func NewResources() *Resources { Permissions: make(map[string]any), Pipeline: make(map[string]any), Provider: make(map[string]any), + QualityMonitor: make(map[string]any), Recipient: make(map[string]any), RegisteredModel: make(map[string]any), Repo: make(map[string]any), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index b1fed9424..e4ca67740 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.43.0" +const ProviderVersion = "1.46.0" func NewRoot() *Root { return &Root{ From a33d0c8bf9f19fb594d33c478adb5f925076dfe6 Mon Sep 17 00:00:00 2001 From: Aravind Segu Date: Fri, 31 May 2024 02:42:25 -0700 Subject: [PATCH 206/286] Add support for Lakehouse monitoring in bundles (#1307) ## Changes This change adds support for Lakehouse monitoring in bundles. The associated resource type name is "quality monitor". ## Testing Unit tests. --------- Co-authored-by: Pieter Noordhuis Co-authored-by: Pieter Noordhuis Co-authored-by: Arpit Jasapara <87999496+arpitjasa-db@users.noreply.github.com> --- .../mutator/process_target_mode_test.go | 8 +++ bundle/config/mutator/run_as.go | 10 ++++ bundle/config/mutator/run_as_test.go | 1 + bundle/config/resources.go | 20 +++++++ bundle/config/resources/quality_monitor.go | 60 +++++++++++++++++++ bundle/deploy/terraform/convert.go | 22 +++++++ bundle/deploy/terraform/convert_test.go | 55 +++++++++++++++++ bundle/deploy/terraform/interpolate.go | 2 + .../tfdyn/convert_quality_monitor.go | 37 ++++++++++++ .../tfdyn/convert_quality_monitor_test.go | 46 ++++++++++++++ bundle/tests/quality_monitor/databricks.yml | 40 +++++++++++++ bundle/tests/quality_monitor_test.go | 59 ++++++++++++++++++ libs/dyn/convert/struct_info.go | 9 +++ libs/textutil/case.go | 14 +++++ libs/textutil/case_test.go | 40 +++++++++++++ 15 files changed, 423 insertions(+) create mode 100644 bundle/config/resources/quality_monitor.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_quality_monitor.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go create mode 100644 bundle/tests/quality_monitor/databricks.yml create mode 100644 bundle/tests/quality_monitor_test.go create mode 100644 libs/textutil/case.go create mode 100644 libs/textutil/case_test.go diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 583efcfe5..cf8229bfe 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -97,6 +97,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle { RegisteredModels: map[string]*resources.RegisteredModel{ "registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}}, }, + QualityMonitors: map[string]*resources.QualityMonitor{ + "qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}}, + }, }, }, // Use AWS implementation for testing. @@ -145,6 +148,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Registered model 1 assert.Equal(t, "dev_lennart_registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) + + // Quality Monitor 1 + assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) } func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { @@ -200,6 +206,7 @@ func TestProcessTargetModeDefault(t *testing.T) { assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) + assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) } func TestProcessTargetModeProduction(t *testing.T) { @@ -240,6 +247,7 @@ func TestProcessTargetModeProduction(t *testing.T) { assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) + assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) } func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) { diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index c5b294b27..aecd1d17e 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -100,6 +100,16 @@ func validateRunAs(b *bundle.Bundle) error { } } + // Monitors do not support run_as in the API. + if len(b.Config.Resources.QualityMonitors) > 0 { + return errUnsupportedResourceTypeForRunAs{ + resourceType: "quality_monitors", + resourceLocation: b.Config.GetLocation("resources.quality_monitors"), + currentUser: b.Config.Workspace.CurrentUser.UserName, + runAsUser: identity, + } + } + return nil } diff --git a/bundle/config/mutator/run_as_test.go b/bundle/config/mutator/run_as_test.go index d6fb2939f..c57de847b 100644 --- a/bundle/config/mutator/run_as_test.go +++ b/bundle/config/mutator/run_as_test.go @@ -37,6 +37,7 @@ func allResourceTypes(t *testing.T) []string { "model_serving_endpoints", "models", "pipelines", + "quality_monitors", "registered_models", }, resourceTypes, diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 41ffc25cd..f70052ec0 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -17,6 +17,7 @@ type Resources struct { Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"` ModelServingEndpoints map[string]*resources.ModelServingEndpoint `json:"model_serving_endpoints,omitempty"` RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"` + QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"` } type UniqueResourceIdTracker struct { @@ -123,6 +124,19 @@ func (r *Resources) VerifyUniqueResourceIdentifiers() (*UniqueResourceIdTracker, tracker.Type[k] = "registered_model" tracker.ConfigPath[k] = r.RegisteredModels[k].ConfigFilePath } + for k := range r.QualityMonitors { + if _, ok := tracker.Type[k]; ok { + return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)", + k, + tracker.Type[k], + tracker.ConfigPath[k], + "quality_monitor", + r.QualityMonitors[k].ConfigFilePath, + ) + } + tracker.Type[k] = "quality_monitor" + tracker.ConfigPath[k] = r.QualityMonitors[k].ConfigFilePath + } return tracker, nil } @@ -152,6 +166,9 @@ func (r *Resources) allResources() []resource { for k, e := range r.RegisteredModels { all = append(all, resource{resource_type: "registered model", resource: e, key: k}) } + for k, e := range r.QualityMonitors { + all = append(all, resource{resource_type: "quality monitor", resource: e, key: k}) + } return all } @@ -189,6 +206,9 @@ func (r *Resources) ConfigureConfigFilePath() { for _, e := range r.RegisteredModels { e.ConfigureConfigFilePath() } + for _, e := range r.QualityMonitors { + e.ConfigureConfigFilePath() + } } type ConfigResource interface { diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go new file mode 100644 index 000000000..0d13e58fa --- /dev/null +++ b/bundle/config/resources/quality_monitor.go @@ -0,0 +1,60 @@ +package resources + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/marshal" + "github.com/databricks/databricks-sdk-go/service/catalog" +) + +type QualityMonitor struct { + // Represents the Input Arguments for Terraform and will get + // converted to a HCL representation for CRUD + *catalog.CreateMonitor + + // This represents the id which is the full name of the monitor + // (catalog_name.schema_name.table_name) that can be used + // as a reference in other resources. This value is returned by terraform. + ID string `json:"id,omitempty" bundle:"readonly"` + + // Path to config file where the resource is defined. All bundle resources + // include this for interpolation purposes. + paths.Paths + + ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` +} + +func (s *QualityMonitor) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QualityMonitor) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.QualityMonitors.Get(ctx, catalog.GetQualityMonitorRequest{ + TableName: id, + }) + if err != nil { + log.Debugf(ctx, "quality monitor %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *QualityMonitor) TerraformResourceName() string { + return "databricks_quality_monitor" +} + +func (s *QualityMonitor) Validate() error { + if s == nil || !s.DynamicValue.IsValid() { + return fmt.Errorf("quality monitor is not defined") + } + + return nil +} diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index d0b633582..a6ec04d9a 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -222,6 +222,13 @@ func BundleToTerraform(config *config.Root) *schema.Root { } } + for k, src := range config.Resources.QualityMonitors { + noResources = false + var dst schema.ResourceQualityMonitor + conv(src, &dst) + tfroot.Resource.QualityMonitor[k] = &dst + } + // We explicitly set "resource" to nil to omit it from a JSON encoding. // This is required because the terraform CLI requires >= 1 resources defined // if the "resource" property is used in a .tf.json file. @@ -365,6 +372,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error { } cur.ID = instance.Attributes.ID config.Resources.RegisteredModels[resource.Name] = cur + case "databricks_quality_monitor": + if config.Resources.QualityMonitors == nil { + config.Resources.QualityMonitors = make(map[string]*resources.QualityMonitor) + } + cur := config.Resources.QualityMonitors[resource.Name] + if cur == nil { + cur = &resources.QualityMonitor{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID + config.Resources.QualityMonitors[resource.Name] = cur case "databricks_permissions": case "databricks_grants": // Ignore; no need to pull these back into the configuration. @@ -404,6 +421,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error { src.ModifiedStatus = resources.ModifiedStatusCreated } } + for _, src := range config.Resources.QualityMonitors { + if src.ModifiedStatus == "" && src.ID == "" { + src.ModifiedStatus = resources.ModifiedStatusCreated + } + } return nil } diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 58523bb49..e1f73be28 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -629,6 +629,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { {Attributes: stateInstanceAttributes{ID: "1"}}, }, }, + { + Type: "databricks_quality_monitor", + Mode: "managed", + Name: "test_monitor", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, }, } err := TerraformToBundle(&tfState, &config) @@ -652,6 +660,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { assert.Equal(t, "1", config.Resources.RegisteredModels["test_registered_model"].ID) assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.RegisteredModels["test_registered_model"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.QualityMonitors["test_monitor"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.QualityMonitors["test_monitor"].ModifiedStatus) + AssertFullResourceCoverage(t, &config) } @@ -700,6 +711,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, }, }, + QualityMonitors: map[string]*resources.QualityMonitor{ + "test_monitor": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "test_monitor", + }, + }, + }, }, } var tfState = resourcesState{ @@ -726,6 +744,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { assert.Equal(t, "", config.Resources.RegisteredModels["test_registered_model"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.RegisteredModels["test_registered_model"].ModifiedStatus) + assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.QualityMonitors["test_monitor"].ModifiedStatus) + AssertFullResourceCoverage(t, &config) } @@ -804,6 +825,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, }, }, + QualityMonitors: map[string]*resources.QualityMonitor{ + "test_monitor": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "test_monitor", + }, + }, + "test_monitor_new": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "test_monitor_new", + }, + }, + }, }, } var tfState = resourcesState{ @@ -904,6 +937,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { {Attributes: stateInstanceAttributes{ID: "2"}}, }, }, + { + Type: "databricks_quality_monitor", + Mode: "managed", + Name: "test_monitor", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "test_monitor"}}, + }, + }, + { + Type: "databricks_quality_monitor", + Mode: "managed", + Name: "test_monitor_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "test_monitor_old"}}, + }, + }, }, } err := TerraformToBundle(&tfState, &config) @@ -951,6 +1000,12 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { assert.Equal(t, "", config.Resources.ModelServingEndpoints["test_model_serving_new"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.ModelServingEndpoints["test_model_serving_new"].ModifiedStatus) + assert.Equal(t, "test_monitor", config.Resources.QualityMonitors["test_monitor"].ID) + assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor"].ModifiedStatus) + assert.Equal(t, "test_monitor_old", config.Resources.QualityMonitors["test_monitor_old"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.QualityMonitors["test_monitor_old"].ModifiedStatus) + assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor_new"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.QualityMonitors["test_monitor_new"].ModifiedStatus) AssertFullResourceCoverage(t, &config) } diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index 358279a7a..608f1c795 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -54,6 +54,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D path = dyn.NewPath(dyn.Key("databricks_model_serving")).Append(path[2:]...) case dyn.Key("registered_models"): path = dyn.NewPath(dyn.Key("databricks_registered_model")).Append(path[2:]...) + case dyn.Key("quality_monitors"): + path = dyn.NewPath(dyn.Key("databricks_quality_monitor")).Append(path[2:]...) default: // Trigger "key not found" for unknown resource types. return dyn.GetByPath(root, path) diff --git a/bundle/deploy/terraform/tfdyn/convert_quality_monitor.go b/bundle/deploy/terraform/tfdyn/convert_quality_monitor.go new file mode 100644 index 000000000..341df7c22 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_quality_monitor.go @@ -0,0 +1,37 @@ +package tfdyn + +import ( + "context" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertQualityMonitorResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceQualityMonitor{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "monitor normalization diagnostic: %s", diag.Summary) + } + return vout, nil +} + +type qualityMonitorConverter struct{} + +func (qualityMonitorConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertQualityMonitorResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.QualityMonitor[key] = vout.AsAny() + + return nil +} + +func init() { + registerConverter("quality_monitors", qualityMonitorConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go new file mode 100644 index 000000000..50bfce7a0 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go @@ -0,0 +1,46 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertQualityMonitor(t *testing.T) { + var src = resources.QualityMonitor{ + CreateMonitor: &catalog.CreateMonitor{ + TableName: "test_table_name", + AssetsDir: "assets_dir", + OutputSchemaName: "output_schema_name", + InferenceLog: &catalog.MonitorInferenceLog{ + ModelIdCol: "model_id", + PredictionCol: "test_prediction_col", + ProblemType: "PROBLEM_TYPE_CLASSIFICATION", + }, + }, + } + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + ctx := context.Background() + out := schema.NewResources() + err = qualityMonitorConverter{}.Convert(ctx, "my_monitor", vin, out) + + require.NoError(t, err) + assert.Equal(t, map[string]any{ + "assets_dir": "assets_dir", + "output_schema_name": "output_schema_name", + "table_name": "test_table_name", + "inference_log": map[string]any{ + "model_id_col": "model_id", + "prediction_col": "test_prediction_col", + "problem_type": "PROBLEM_TYPE_CLASSIFICATION", + }, + }, out.QualityMonitor["my_monitor"]) +} diff --git a/bundle/tests/quality_monitor/databricks.yml b/bundle/tests/quality_monitor/databricks.yml new file mode 100644 index 000000000..3abcdfdda --- /dev/null +++ b/bundle/tests/quality_monitor/databricks.yml @@ -0,0 +1,40 @@ +resources: + quality_monitors: + my_monitor: + table_name: "main.test.thing1" + assets_dir: "/Shared/provider-test/databricks_monitoring/main.test.thing1" + output_schema_name: "test" + inference_log: + granularities: ["1 day"] + timestamp_col: "timestamp" + prediction_col: "prediction" + model_id_col: "model_id" + problem_type: "PROBLEM_TYPE_REGRESSION" + +targets: + development: + mode: development + resources: + quality_monitors: + my_monitor: + table_name: "main.test.dev" + + staging: + resources: + quality_monitors: + my_monitor: + table_name: "main.test.staging" + output_schema_name: "staging" + + production: + resources: + quality_monitors: + my_monitor: + table_name: "main.test.prod" + output_schema_name: "prod" + inference_log: + granularities: ["1 hour"] + timestamp_col: "timestamp_prod" + prediction_col: "prediction_prod" + model_id_col: "model_id_prod" + problem_type: "PROBLEM_TYPE_REGRESSION" diff --git a/bundle/tests/quality_monitor_test.go b/bundle/tests/quality_monitor_test.go new file mode 100644 index 000000000..d5db05196 --- /dev/null +++ b/bundle/tests/quality_monitor_test.go @@ -0,0 +1,59 @@ +package config_tests + +import ( + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" +) + +func assertExpectedMonitor(t *testing.T, p *resources.QualityMonitor) { + assert.Equal(t, "timestamp", p.InferenceLog.TimestampCol) + assert.Equal(t, "prediction", p.InferenceLog.PredictionCol) + assert.Equal(t, "model_id", p.InferenceLog.ModelIdCol) + assert.Equal(t, catalog.MonitorInferenceLogProblemType("PROBLEM_TYPE_REGRESSION"), p.InferenceLog.ProblemType) +} + +func TestMonitorTableNames(t *testing.T) { + b := loadTarget(t, "./quality_monitor", "development") + assert.Len(t, b.Config.Resources.QualityMonitors, 1) + assert.Equal(t, b.Config.Bundle.Mode, config.Development) + + p := b.Config.Resources.QualityMonitors["my_monitor"] + assert.Equal(t, "main.test.dev", p.TableName) + assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) + assert.Equal(t, "test", p.OutputSchemaName) + + assertExpectedMonitor(t, p) +} + +func TestMonitorStaging(t *testing.T) { + b := loadTarget(t, "./quality_monitor", "staging") + assert.Len(t, b.Config.Resources.QualityMonitors, 1) + + p := b.Config.Resources.QualityMonitors["my_monitor"] + assert.Equal(t, "main.test.staging", p.TableName) + assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) + assert.Equal(t, "staging", p.OutputSchemaName) + + assertExpectedMonitor(t, p) +} + +func TestMonitorProduction(t *testing.T) { + b := loadTarget(t, "./quality_monitor", "production") + assert.Len(t, b.Config.Resources.QualityMonitors, 1) + + p := b.Config.Resources.QualityMonitors["my_monitor"] + assert.Equal(t, "main.test.prod", p.TableName) + assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) + assert.Equal(t, "prod", p.OutputSchemaName) + + inferenceLog := p.InferenceLog + assert.Equal(t, []string{"1 day", "1 hour"}, inferenceLog.Granularities) + assert.Equal(t, "timestamp_prod", p.InferenceLog.TimestampCol) + assert.Equal(t, "prediction_prod", p.InferenceLog.PredictionCol) + assert.Equal(t, "model_id_prod", p.InferenceLog.ModelIdCol) + assert.Equal(t, catalog.MonitorInferenceLogProblemType("PROBLEM_TYPE_REGRESSION"), p.InferenceLog.ProblemType) +} diff --git a/libs/dyn/convert/struct_info.go b/libs/dyn/convert/struct_info.go index dc3ed4da4..595e52edd 100644 --- a/libs/dyn/convert/struct_info.go +++ b/libs/dyn/convert/struct_info.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/textutil" ) // structInfo holds the type information we need to efficiently @@ -84,6 +85,14 @@ func buildStructInfo(typ reflect.Type) structInfo { } name, _, _ := strings.Cut(sf.Tag.Get("json"), ",") + if typ.Name() == "QualityMonitor" && name == "-" { + urlName, _, _ := strings.Cut(sf.Tag.Get("url"), ",") + if urlName == "" || urlName == "-" { + name = textutil.CamelToSnakeCase(sf.Name) + } else { + name = urlName + } + } if name == "" || name == "-" { continue } diff --git a/libs/textutil/case.go b/libs/textutil/case.go new file mode 100644 index 000000000..a8c780591 --- /dev/null +++ b/libs/textutil/case.go @@ -0,0 +1,14 @@ +package textutil + +import "unicode" + +func CamelToSnakeCase(name string) string { + var out []rune = make([]rune, 0, len(name)*2) + for i, r := range name { + if i > 0 && unicode.IsUpper(r) { + out = append(out, '_') + } + out = append(out, unicode.ToLower(r)) + } + return string(out) +} diff --git a/libs/textutil/case_test.go b/libs/textutil/case_test.go new file mode 100644 index 000000000..77b3e0679 --- /dev/null +++ b/libs/textutil/case_test.go @@ -0,0 +1,40 @@ +package textutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCamelToSnakeCase(t *testing.T) { + cases := []struct { + input string + expected string + }{ + { + input: "test", + expected: "test", + }, + { + input: "testTest", + expected: "test_test", + }, + { + input: "testTestTest", + expected: "test_test_test", + }, + { + input: "TestTest", + expected: "test_test", + }, + { + input: "TestTestTest", + expected: "test_test_test", + }, + } + + for _, c := range cases { + output := CamelToSnakeCase(c.input) + assert.Equal(t, c.expected, output) + } +} From 30fd84893f7bf5091128920e0aac5cd40e09404d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 3 Jun 2024 14:39:27 +0200 Subject: [PATCH 207/286] Generate bundle schema placeholder for quality monitors (#1465) ## Changes Generated with default generation command. The team is making a fix to ensure the proper comments are included later. ## Tests n/a --- bundle/schema/docs/bundle_descriptions.json | 300 ++++++++++++++++++++ 1 file changed, 300 insertions(+) diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index b6d0235aa..e9c9e71ed 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -2710,6 +2710,156 @@ } } }, + "quality_monitors": { + "description": "", + "additionalproperties": { + "description": "", + "properties": { + "assets_dir": { + "description": "" + }, + "baseline_table_name": { + "description": "" + }, + "custom_metrics": { + "description": "", + "items": { + "description": "", + "properties": { + "definition": { + "description": "" + }, + "input_columns": { + "description": "", + "items": { + "description": "" + } + }, + "name": { + "description": "" + }, + "output_data_type": { + "description": "" + }, + "type": { + "description": "" + } + } + } + }, + "data_classification_config": { + "description": "", + "properties": { + "enabled": { + "description": "" + } + } + }, + "inference_log": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "label_col": { + "description": "" + }, + "model_id_col": { + "description": "" + }, + "prediction_col": { + "description": "" + }, + "prediction_proba_col": { + "description": "" + }, + "problem_type": { + "description": "" + }, + "timestamp_col": { + "description": "" + } + } + }, + "notifications": { + "description": "", + "properties": { + "on_failure": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + }, + "on_new_classification_tag_detected": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + } + } + }, + "output_schema_name": { + "description": "" + }, + "schedule": { + "description": "", + "properties": { + "pause_status": { + "description": "" + }, + "quartz_cron_expression": { + "description": "" + }, + "timezone_id": { + "description": "" + } + } + }, + "skip_builtin_dashboard": { + "description": "" + }, + "slicing_exprs": { + "description": "", + "items": { + "description": "" + } + }, + "snapshot": { + "description": "" + }, + "time_series": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "timestamp_col": { + "description": "" + } + } + }, + "warehouse_id": { + "description": "" + } + } + } + }, "registered_models": { "description": "List of Registered Models", "additionalproperties": { @@ -5491,6 +5641,156 @@ } } }, + "quality_monitors": { + "description": "", + "additionalproperties": { + "description": "", + "properties": { + "assets_dir": { + "description": "" + }, + "baseline_table_name": { + "description": "" + }, + "custom_metrics": { + "description": "", + "items": { + "description": "", + "properties": { + "definition": { + "description": "" + }, + "input_columns": { + "description": "", + "items": { + "description": "" + } + }, + "name": { + "description": "" + }, + "output_data_type": { + "description": "" + }, + "type": { + "description": "" + } + } + } + }, + "data_classification_config": { + "description": "", + "properties": { + "enabled": { + "description": "" + } + } + }, + "inference_log": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "label_col": { + "description": "" + }, + "model_id_col": { + "description": "" + }, + "prediction_col": { + "description": "" + }, + "prediction_proba_col": { + "description": "" + }, + "problem_type": { + "description": "" + }, + "timestamp_col": { + "description": "" + } + } + }, + "notifications": { + "description": "", + "properties": { + "on_failure": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + }, + "on_new_classification_tag_detected": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + } + } + }, + "output_schema_name": { + "description": "" + }, + "schedule": { + "description": "", + "properties": { + "pause_status": { + "description": "" + }, + "quartz_cron_expression": { + "description": "" + }, + "timezone_id": { + "description": "" + } + } + }, + "skip_builtin_dashboard": { + "description": "" + }, + "slicing_exprs": { + "description": "", + "items": { + "description": "" + } + }, + "snapshot": { + "description": "" + }, + "time_series": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "timestamp_col": { + "description": "" + } + } + }, + "warehouse_id": { + "description": "" + } + } + } + }, "registered_models": { "description": "List of Registered Models", "additionalproperties": { From c9b4f119472962880c914759ac43278095010d6a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 3 Jun 2024 14:39:36 +0200 Subject: [PATCH 208/286] Update error checks that use the `os` package to use `errors.Is` (#1461) ## Changes From the [documentation](https://pkg.go.dev/os#IsNotExist) on the functions in the `os` package: > This function predates errors.Is. It only supports errors returned by the os package. > New code should use errors.Is(err, fs.ErrNotExist). This issue surfaced while working on using a different `vfs.Path` implementation that uses errors from the `fs` package. Calls to `os.IsNotExist` didn't return true for errors that wrap `fs.ErrNotExist`. ## Tests n/a --- bundle/bundle_test.go | 4 +++- bundle/config/mutator/translate_paths.go | 5 +++-- bundle/deploy/files/delete.go | 4 +++- bundle/deploy/state_pull_test.go | 4 +++- bundle/deploy/state_update.go | 4 +++- bundle/deploy/terraform/init.go | 6 ++++-- bundle/internal/tf/codegen/schema/schema.go | 4 +++- cmd/auth/profiles.go | 5 +++-- libs/databrickscfg/loader.go | 4 ++-- libs/databrickscfg/ops.go | 4 +++- libs/filer/local_client.go | 17 +++++++++-------- libs/git/config.go | 10 ++++++---- libs/git/ignore.go | 4 ++-- libs/git/reference.go | 4 ++-- libs/git/repository.go | 5 +++-- libs/notebook/detect_test.go | 6 ++++-- libs/sync/snapshot.go | 6 ++++-- libs/template/materialize.go | 3 ++- libs/template/renderer.go | 3 ++- 19 files changed, 64 insertions(+), 38 deletions(-) diff --git a/bundle/bundle_test.go b/bundle/bundle_test.go index 908b446e2..a29aa024b 100644 --- a/bundle/bundle_test.go +++ b/bundle/bundle_test.go @@ -2,6 +2,8 @@ package bundle import ( "context" + "errors" + "io/fs" "os" "path/filepath" "testing" @@ -14,7 +16,7 @@ import ( func TestLoadNotExists(t *testing.T) { b, err := Load(context.Background(), "/doesntexist") - assert.True(t, os.IsNotExist(err)) + assert.True(t, errors.Is(err, fs.ErrNotExist)) assert.Nil(t, b) } diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 18a09dfd6..d9ab9e9e8 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io/fs" "net/url" "os" "path" @@ -109,7 +110,7 @@ func (m *translatePaths) rewritePath( func translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { nb, _, err := notebook.Detect(localFullPath) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("notebook %s not found", literal) } if err != nil { @@ -125,7 +126,7 @@ func translateNotebookPath(literal, localFullPath, localRelPath, remotePath stri func translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { nb, _, err := notebook.Detect(localFullPath) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("file %s not found", literal) } if err != nil { diff --git a/bundle/deploy/files/delete.go b/bundle/deploy/files/delete.go index 066368a6b..133971449 100644 --- a/bundle/deploy/files/delete.go +++ b/bundle/deploy/files/delete.go @@ -2,7 +2,9 @@ package files import ( "context" + "errors" "fmt" + "io/fs" "os" "github.com/databricks/cli/bundle" @@ -67,7 +69,7 @@ func deleteSnapshotFile(ctx context.Context, b *bundle.Bundle) error { return err } err = os.Remove(sp) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("failed to destroy sync snapshot file: %s", err) } return nil diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index bcb88374f..409895a25 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -4,7 +4,9 @@ import ( "bytes" "context" "encoding/json" + "errors" "io" + "io/fs" "os" "testing" @@ -270,7 +272,7 @@ func TestStatePullNoState(t *testing.T) { require.NoError(t, err) _, err = os.Stat(statePath) - require.True(t, os.IsNotExist(err)) + require.True(t, errors.Is(err, fs.ErrNotExist)) } func TestStatePullOlderState(t *testing.T) { diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go index 885e47a7a..6903a9f87 100644 --- a/bundle/deploy/state_update.go +++ b/bundle/deploy/state_update.go @@ -4,7 +4,9 @@ import ( "bytes" "context" "encoding/json" + "errors" "io" + "io/fs" "os" "time" @@ -95,7 +97,7 @@ func load(ctx context.Context, b *bundle.Bundle) (*DeploymentState, error) { log.Infof(ctx, "Loading deployment state from %s", statePath) f, err := os.Open(statePath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { log.Infof(ctx, "No deployment state file found") return &DeploymentState{ Version: DeploymentStateVersion, diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 69ae70ba6..d1847cf24 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -2,7 +2,9 @@ package terraform import ( "context" + "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -59,7 +61,7 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con // If the execPath already exists, return it. execPath := filepath.Join(binDir, product.Terraform.BinaryName()) _, err = os.Stat(execPath) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return "", err } if err == nil { @@ -148,7 +150,7 @@ func getEnvVarWithMatchingVersion(ctx context.Context, envVarName string, versio // If the path does not exist, we return early. _, err := os.Stat(envValue) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { log.Debugf(ctx, "%s at %s does not exist", envVarName, envValue) return "", nil } else { diff --git a/bundle/internal/tf/codegen/schema/schema.go b/bundle/internal/tf/codegen/schema/schema.go index 534da4a02..f94b94f04 100644 --- a/bundle/internal/tf/codegen/schema/schema.go +++ b/bundle/internal/tf/codegen/schema/schema.go @@ -2,6 +2,8 @@ package schema import ( "context" + "errors" + "io/fs" "os" "path/filepath" @@ -41,7 +43,7 @@ func Load(ctx context.Context) (*tfjson.ProviderSchema, error) { } // Generate schema file if it doesn't exist. - if _, err := os.Stat(s.ProviderSchemaFile); os.IsNotExist(err) { + if _, err := os.Stat(s.ProviderSchemaFile); errors.Is(err, fs.ErrNotExist) { err = s.Generate(ctx) if err != nil { return nil, err diff --git a/cmd/auth/profiles.go b/cmd/auth/profiles.go index 61a6c1f33..2fc8a314b 100644 --- a/cmd/auth/profiles.go +++ b/cmd/auth/profiles.go @@ -2,8 +2,9 @@ package auth import ( "context" + "errors" "fmt" - "os" + "io/fs" "sync" "time" @@ -95,7 +96,7 @@ func newProfilesCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { var profiles []*profileMetadata iniFile, err := profile.DefaultProfiler.Get(cmd.Context()) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { // return empty list for non-configured machines iniFile = &config.File{ File: &ini.File{}, diff --git a/libs/databrickscfg/loader.go b/libs/databrickscfg/loader.go index 2e22ee950..12a516c59 100644 --- a/libs/databrickscfg/loader.go +++ b/libs/databrickscfg/loader.go @@ -4,7 +4,7 @@ import ( "context" "errors" "fmt" - "os" + "io/fs" "strings" "github.com/databricks/cli/libs/log" @@ -68,7 +68,7 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error { ctx := context.Background() configFile, err := config.LoadFile(cfg.ConfigFile) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } return fmt.Errorf("cannot parse config file: %w", err) diff --git a/libs/databrickscfg/ops.go b/libs/databrickscfg/ops.go index 90795afd5..6a1c182af 100644 --- a/libs/databrickscfg/ops.go +++ b/libs/databrickscfg/ops.go @@ -2,7 +2,9 @@ package databrickscfg import ( "context" + "errors" "fmt" + "io/fs" "os" "strings" @@ -29,7 +31,7 @@ func loadOrCreateConfigFile(filename string) (*config.File, error) { filename = fmt.Sprintf("%s%s", homedir, filename[1:]) } configFile, err := config.LoadFile(filename) - if err != nil && os.IsNotExist(err) { + if err != nil && errors.Is(err, fs.ErrNotExist) { file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileMode) if err != nil { return nil, fmt.Errorf("create %s: %w", filename, err) diff --git a/libs/filer/local_client.go b/libs/filer/local_client.go index 9398958f5..48e8a05ee 100644 --- a/libs/filer/local_client.go +++ b/libs/filer/local_client.go @@ -2,6 +2,7 @@ package filer import ( "context" + "errors" "io" "io/fs" "os" @@ -35,7 +36,7 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, } f, err := os.OpenFile(absPath, flags, 0644) - if os.IsNotExist(err) && slices.Contains(mode, CreateParentDirectories) { + if errors.Is(err, fs.ErrNotExist) && slices.Contains(mode, CreateParentDirectories) { // Create parent directories if they don't exist. err = os.MkdirAll(filepath.Dir(absPath), 0755) if err != nil { @@ -47,9 +48,9 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, if err != nil { switch { - case os.IsNotExist(err): + case errors.Is(err, fs.ErrNotExist): return NoSuchDirectoryError{path: absPath} - case os.IsExist(err): + case errors.Is(err, fs.ErrExist): return FileAlreadyExistsError{path: absPath} default: return err @@ -77,7 +78,7 @@ func (w *LocalClient) Read(ctx context.Context, name string) (io.ReadCloser, err // 2. Allows us to error out if the path is a directory stat, err := os.Stat(absPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, FileDoesNotExistError{path: absPath} } return nil, err @@ -108,11 +109,11 @@ func (w *LocalClient) Delete(ctx context.Context, name string, mode ...DeleteMod return nil } - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return FileDoesNotExistError{path: absPath} } - if os.IsExist(err) { + if errors.Is(err, fs.ErrExist) { if slices.Contains(mode, DeleteRecursively) { return os.RemoveAll(absPath) } @@ -130,7 +131,7 @@ func (w *LocalClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, stat, err := os.Stat(absPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, NoSuchDirectoryError{path: absPath} } return nil, err @@ -159,7 +160,7 @@ func (w *LocalClient) Stat(ctx context.Context, name string) (fs.FileInfo, error } stat, err := os.Stat(absPath) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, FileDoesNotExistError{path: absPath} } return stat, err diff --git a/libs/git/config.go b/libs/git/config.go index 424d453bc..fafd81bd6 100644 --- a/libs/git/config.go +++ b/libs/git/config.go @@ -1,8 +1,10 @@ package git import ( + "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "regexp" @@ -88,12 +90,12 @@ func (c config) load(r io.Reader) error { return nil } -func (c config) loadFile(fs vfs.Path, path string) error { - f, err := fs.Open(path) +func (c config) loadFile(root vfs.Path, path string) error { + f, err := root.Open(path) if err != nil { // If the file doesn't exist it is ignored. // This is the case for both global and repository specific config files. - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } return err @@ -130,7 +132,7 @@ func (c config) coreExcludesFile() (string, error) { // If there are other problems accessing this file we would // run into them at a later point anyway. _, err := os.Stat(path) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return "", err } diff --git a/libs/git/ignore.go b/libs/git/ignore.go index df3a4e919..9f501e472 100644 --- a/libs/git/ignore.go +++ b/libs/git/ignore.go @@ -1,8 +1,8 @@ package git import ( + "errors" "io/fs" - "os" "strings" "time" @@ -74,7 +74,7 @@ func (f *ignoreFile) load() error { // If it doesn't exist, treat it as an empty file. stat, err := fs.Stat(f.root, f.path) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } return err diff --git a/libs/git/reference.go b/libs/git/reference.go index 2b4bd3e4d..2165a9cda 100644 --- a/libs/git/reference.go +++ b/libs/git/reference.go @@ -1,9 +1,9 @@ package git import ( + "errors" "fmt" "io/fs" - "os" "regexp" "strings" @@ -42,7 +42,7 @@ func isSHA1(s string) bool { func LoadReferenceFile(root vfs.Path, path string) (*Reference, error) { // read reference file content b, err := fs.ReadFile(root, path) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, nil } if err != nil { diff --git a/libs/git/repository.go b/libs/git/repository.go index 6baf26c2e..86d56a7fc 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -1,8 +1,9 @@ package git import ( + "errors" "fmt" - "os" + "io/fs" "path" "path/filepath" "strings" @@ -190,7 +191,7 @@ func NewRepository(path vfs.Path) (*Repository, error) { real := true rootPath, err := vfs.FindLeafInTree(path, GitDirectoryName) if err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, fs.ErrNotExist) { return nil, err } // Cannot find `.git` directory. diff --git a/libs/notebook/detect_test.go b/libs/notebook/detect_test.go index 5d3aa8a81..fd3337579 100644 --- a/libs/notebook/detect_test.go +++ b/libs/notebook/detect_test.go @@ -1,6 +1,8 @@ package notebook import ( + "errors" + "io/fs" "os" "path/filepath" "testing" @@ -50,7 +52,7 @@ func TestDetectCallsDetectJupyter(t *testing.T) { func TestDetectUnknownExtension(t *testing.T) { _, _, err := Detect("./testdata/doesntexist.foobar") - assert.True(t, os.IsNotExist(err)) + assert.True(t, errors.Is(err, fs.ErrNotExist)) nb, _, err := Detect("./testdata/unknown_extension.foobar") require.NoError(t, err) @@ -59,7 +61,7 @@ func TestDetectUnknownExtension(t *testing.T) { func TestDetectNoExtension(t *testing.T) { _, _, err := Detect("./testdata/doesntexist") - assert.True(t, os.IsNotExist(err)) + assert.True(t, errors.Is(err, fs.ErrNotExist)) nb, _, err := Detect("./testdata/no_extension") require.NoError(t, err) diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index 392e274d4..b46bd19f4 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -3,7 +3,9 @@ package sync import ( "context" "encoding/json" + "errors" "fmt" + "io/fs" "os" "path/filepath" "time" @@ -88,7 +90,7 @@ func GetFileName(host, remotePath string) string { // precisely it's the first 16 characters of md5(concat(host, remotePath)) func SnapshotPath(opts *SyncOptions) (string, error) { snapshotDir := filepath.Join(opts.SnapshotBasePath, syncSnapshotDirName) - if _, err := os.Stat(snapshotDir); os.IsNotExist(err) { + if _, err := os.Stat(snapshotDir); errors.Is(err, fs.ErrNotExist) { err = os.MkdirAll(snapshotDir, 0755) if err != nil { return "", fmt.Errorf("failed to create config directory: %s", err) @@ -145,7 +147,7 @@ func loadOrNewSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error } // Snapshot file not found. We return the new copy. - if _, err := os.Stat(snapshot.SnapshotPath); os.IsNotExist(err) { + if _, err := os.Stat(snapshot.SnapshotPath); errors.Is(err, fs.ErrNotExist) { return snapshot, nil } diff --git a/libs/template/materialize.go b/libs/template/materialize.go index 811ef9259..04f4c8f0c 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -3,6 +3,7 @@ package template import ( "context" "embed" + "errors" "fmt" "io/fs" "os" @@ -44,7 +45,7 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st schemaPath := filepath.Join(templateRoot, schemaFileName) helpers := loadHelpers(ctx) - if _, err := os.Stat(schemaPath); os.IsNotExist(err) { + if _, err := os.Stat(schemaPath); errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("not a bundle template: expected to find a template schema file at %s", schemaPath) } diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 6415cd84a..827f30133 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path" "path/filepath" @@ -313,7 +314,7 @@ func (r *renderer) persistToDisk() error { if err == nil { return fmt.Errorf("failed to initialize template, one or more files already exist: %s", path) } - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("error while verifying file %s does not already exist: %w", path, err) } } From 70fd8ad3d7d36048d11472b02c3cb0705664693a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 3 Jun 2024 16:14:48 +0200 Subject: [PATCH 209/286] Update OpenAPI spec (#1466) ## Changes Notable changes: * Pagination of account-level storage credentials * Rename app deployment method Go SDK release notes: https://github.com/databricks/databricks-sdk-go/releases/tag/v0.42.0 ## Tests * Nightlies pass. --- .codegen/_openapi_sha | 2 +- bundle/schema/docs/bundle_descriptions.json | 4 +- .../storage-credentials.go | 7 +- cmd/workspace/apps/apps.go | 259 +++++++++--------- cmd/workspace/clusters/clusters.go | 2 + .../consumer-listings/consumer-listings.go | 57 ++++ .../consumer-providers/consumer-providers.go | 57 ++++ go.mod | 18 +- go.sum | 44 +-- 9 files changed, 288 insertions(+), 162 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 8c62ac620..de0f45ab9 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -7eb5ad9a2ed3e3f1055968a2d1014ac92c06fe92 \ No newline at end of file +37b925eba37dfb3d7e05b6ba2d458454ce62d3a0 \ No newline at end of file diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index e9c9e71ed..ab948b8b7 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -1915,7 +1915,7 @@ "description": "ARN of the instance profile that the served model will use to access AWS resources." }, "model_name": { - "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, \nin the form of __catalog_name__.__schema_name__.__model_name__.\n" + "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model,\nin the form of __catalog_name__.__schema_name__.__model_name__.\n" }, "model_version": { "description": "The version of the model in Databricks Model Registry or Unity Catalog to be served." @@ -4846,7 +4846,7 @@ "description": "ARN of the instance profile that the served model will use to access AWS resources." }, "model_name": { - "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, \nin the form of __catalog_name__.__schema_name__.__model_name__.\n" + "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model,\nin the form of __catalog_name__.__schema_name__.__model_name__.\n" }, "model_version": { "description": "The version of the model in Databricks Model Registry or Unity Catalog to be served." diff --git a/cmd/account/storage-credentials/storage-credentials.go b/cmd/account/storage-credentials/storage-credentials.go index 0a20b86b6..4280ae8c3 100755 --- a/cmd/account/storage-credentials/storage-credentials.go +++ b/cmd/account/storage-credentials/storage-credentials.go @@ -279,11 +279,8 @@ func newList() *cobra.Command { listReq.MetastoreId = args[0] - response, err := a.StorageCredentials.List(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.StorageCredentials.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 1d6de4775..46568e521 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -35,8 +35,8 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newCreate()) - cmd.AddCommand(newCreateDeployment()) cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeploy()) cmd.AddCommand(newGet()) cmd.AddCommand(newGetDeployment()) cmd.AddCommand(newGetEnvironment()) @@ -79,15 +79,14 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `The description of the app.`) cmd.Use = "create NAME" - cmd.Short = `Create an App.` - cmd.Long = `Create an App. + cmd.Short = `Create an app.` + cmd.Long = `Create an app. Creates a new app. Arguments: NAME: The name of the app. The name must contain only lowercase alphanumeric - characters and hyphens and be between 2 and 30 characters long. It must be - unique within the workspace.` + characters and hyphens. It must be unique within the workspace.` cmd.Annotations = make(map[string]string) @@ -156,107 +155,6 @@ func newCreate() *cobra.Command { return cmd } -// start create-deployment command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var createDeploymentOverrides []func( - *cobra.Command, - *serving.CreateAppDeploymentRequest, -) - -func newCreateDeployment() *cobra.Command { - cmd := &cobra.Command{} - - var createDeploymentReq serving.CreateAppDeploymentRequest - var createDeploymentJson flags.JsonFlag - - var createDeploymentSkipWait bool - var createDeploymentTimeout time.Duration - - cmd.Flags().BoolVar(&createDeploymentSkipWait, "no-wait", createDeploymentSkipWait, `do not wait to reach SUCCEEDED state`) - cmd.Flags().DurationVar(&createDeploymentTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach SUCCEEDED state`) - // TODO: short flags - cmd.Flags().Var(&createDeploymentJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Use = "create-deployment APP_NAME SOURCE_CODE_PATH" - cmd.Short = `Create an App Deployment.` - cmd.Long = `Create an App Deployment. - - Creates an app deployment for the app with the supplied name. - - Arguments: - APP_NAME: The name of the app. - SOURCE_CODE_PATH: The source code path of the deployment.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(1)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, provide only APP_NAME as positional arguments. Provide 'source_code_path' in your JSON input") - } - return nil - } - check := root.ExactArgs(2) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - err = createDeploymentJson.Unmarshal(&createDeploymentReq) - if err != nil { - return err - } - } - createDeploymentReq.AppName = args[0] - if !cmd.Flags().Changed("json") { - createDeploymentReq.SourceCodePath = args[1] - } - - wait, err := w.Apps.CreateDeployment(ctx, createDeploymentReq) - if err != nil { - return err - } - if createDeploymentSkipWait { - return cmdio.Render(ctx, wait.Response) - } - spinner := cmdio.Spinner(ctx) - info, err := wait.OnProgress(func(i *serving.AppDeployment) { - if i.Status == nil { - return - } - status := i.Status.State - statusMessage := fmt.Sprintf("current status: %s", status) - if i.Status != nil { - statusMessage = i.Status.Message - } - spinner <- statusMessage - }).GetWithTimeout(createDeploymentTimeout) - close(spinner) - if err != nil { - return err - } - return cmdio.Render(ctx, info) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range createDeploymentOverrides { - fn(cmd, &createDeploymentReq) - } - - return cmd -} - // start delete command // Slice with functions to override default command behavior. @@ -274,8 +172,8 @@ func newDelete() *cobra.Command { // TODO: short flags cmd.Use = "delete NAME" - cmd.Short = `Delete an App.` - cmd.Long = `Delete an App. + cmd.Short = `Delete an app.` + cmd.Long = `Delete an app. Deletes an app. @@ -315,6 +213,120 @@ func newDelete() *cobra.Command { return cmd } +// start deploy command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deployOverrides []func( + *cobra.Command, + *serving.CreateAppDeploymentRequest, +) + +func newDeploy() *cobra.Command { + cmd := &cobra.Command{} + + var deployReq serving.CreateAppDeploymentRequest + var deployJson flags.JsonFlag + + var deploySkipWait bool + var deployTimeout time.Duration + + cmd.Flags().BoolVar(&deploySkipWait, "no-wait", deploySkipWait, `do not wait to reach SUCCEEDED state`) + cmd.Flags().DurationVar(&deployTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach SUCCEEDED state`) + // TODO: short flags + cmd.Flags().Var(&deployJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "deploy APP_NAME SOURCE_CODE_PATH MODE" + cmd.Short = `Create an app deployment.` + cmd.Long = `Create an app deployment. + + Creates an app deployment for the app with the supplied name. + + Arguments: + APP_NAME: The name of the app. + SOURCE_CODE_PATH: The workspace file system path of the source code used to create the app + deployment. This is different from + deployment_artifacts.source_code_path, which is the path used by the + deployed app. The former refers to the original source code location of + the app in the workspace during deployment creation, whereas the latter + provides a system generated stable snapshotted source code path used by + the deployment. + MODE: The mode of which the deployment will manage the source code.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only APP_NAME as positional arguments. Provide 'source_code_path', 'mode' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = deployJson.Unmarshal(&deployReq) + if err != nil { + return err + } + } + deployReq.AppName = args[0] + if !cmd.Flags().Changed("json") { + deployReq.SourceCodePath = args[1] + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &deployReq.Mode) + if err != nil { + return fmt.Errorf("invalid MODE: %s", args[2]) + } + } + + wait, err := w.Apps.Deploy(ctx, deployReq) + if err != nil { + return err + } + if deploySkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *serving.AppDeployment) { + if i.Status == nil { + return + } + status := i.Status.State + statusMessage := fmt.Sprintf("current status: %s", status) + if i.Status != nil { + statusMessage = i.Status.Message + } + spinner <- statusMessage + }).GetWithTimeout(deployTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deployOverrides { + fn(cmd, &deployReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. @@ -332,8 +344,8 @@ func newGet() *cobra.Command { // TODO: short flags cmd.Use = "get NAME" - cmd.Short = `Get an App.` - cmd.Long = `Get an App. + cmd.Short = `Get an app.` + cmd.Long = `Get an app. Retrieves information for the app with the supplied name. @@ -390,8 +402,8 @@ func newGetDeployment() *cobra.Command { // TODO: short flags cmd.Use = "get-deployment APP_NAME DEPLOYMENT_ID" - cmd.Short = `Get an App Deployment.` - cmd.Long = `Get an App Deployment. + cmd.Short = `Get an app deployment.` + cmd.Long = `Get an app deployment. Retrieves information for the app deployment with the supplied name and deployment id. @@ -451,8 +463,8 @@ func newGetEnvironment() *cobra.Command { // TODO: short flags cmd.Use = "get-environment NAME" - cmd.Short = `Get App Environment.` - cmd.Long = `Get App Environment. + cmd.Short = `Get app environment.` + cmd.Long = `Get app environment. Retrieves app environment. @@ -512,8 +524,8 @@ func newList() *cobra.Command { cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Pagination token to go to the next page of apps.`) cmd.Use = "list" - cmd.Short = `List Apps.` - cmd.Long = `List Apps. + cmd.Short = `List apps.` + cmd.Long = `List apps. Lists all apps in the workspace.` @@ -565,8 +577,8 @@ func newListDeployments() *cobra.Command { cmd.Flags().StringVar(&listDeploymentsReq.PageToken, "page-token", listDeploymentsReq.PageToken, `Pagination token to go to the next page of apps.`) cmd.Use = "list-deployments APP_NAME" - cmd.Short = `List App Deployments.` - cmd.Long = `List App Deployments. + cmd.Short = `List app deployments.` + cmd.Long = `List app deployments. Lists all app deployments for the app with the supplied name. @@ -620,8 +632,8 @@ func newStop() *cobra.Command { // TODO: short flags cmd.Use = "stop NAME" - cmd.Short = `Stop an App.` - cmd.Long = `Stop an App. + cmd.Short = `Stop an app.` + cmd.Long = `Stop an app. Stops the active deployment of the app in the workspace. @@ -682,15 +694,14 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `The description of the app.`) cmd.Use = "update NAME" - cmd.Short = `Update an App.` - cmd.Long = `Update an App. + cmd.Short = `Update an app.` + cmd.Long = `Update an app. Updates the app with the supplied name. Arguments: NAME: The name of the app. The name must contain only lowercase alphanumeric - characters and hyphens and be between 2 and 30 characters long. It must be - unique within the workspace.` + characters and hyphens. It must be unique within the workspace.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index f4baab3b2..abde1bb71 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -200,6 +200,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, + LEGACY_SINGLE_USER_STANDARD, LEGACY_TABLE_ACL, NONE, SINGLE_USER, @@ -445,6 +446,7 @@ func newEdit() *cobra.Command { cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, + LEGACY_SINGLE_USER_STANDARD, LEGACY_TABLE_ACL, NONE, SINGLE_USER, diff --git a/cmd/workspace/consumer-listings/consumer-listings.go b/cmd/workspace/consumer-listings/consumer-listings.go index 8669dfae5..18f3fb39e 100755 --- a/cmd/workspace/consumer-listings/consumer-listings.go +++ b/cmd/workspace/consumer-listings/consumer-listings.go @@ -31,6 +31,7 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newBatchGet()) cmd.AddCommand(newGet()) cmd.AddCommand(newList()) cmd.AddCommand(newSearch()) @@ -43,6 +44,62 @@ func New() *cobra.Command { return cmd } +// start batch-get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var batchGetOverrides []func( + *cobra.Command, + *marketplace.BatchGetListingsRequest, +) + +func newBatchGet() *cobra.Command { + cmd := &cobra.Command{} + + var batchGetReq marketplace.BatchGetListingsRequest + + // TODO: short flags + + // TODO: array: ids + + cmd.Use = "batch-get" + cmd.Short = `Get one batch of listings.` + cmd.Long = `Get one batch of listings. One may specify up to 50 IDs per request. + + Batch get a published listing in the Databricks Marketplace that the consumer + has access to.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.ConsumerListings.BatchGet(ctx, batchGetReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range batchGetOverrides { + fn(cmd, &batchGetReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/consumer-providers/consumer-providers.go b/cmd/workspace/consumer-providers/consumer-providers.go index d8ac0ec12..579a89516 100755 --- a/cmd/workspace/consumer-providers/consumer-providers.go +++ b/cmd/workspace/consumer-providers/consumer-providers.go @@ -30,6 +30,7 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newBatchGet()) cmd.AddCommand(newGet()) cmd.AddCommand(newList()) @@ -41,6 +42,62 @@ func New() *cobra.Command { return cmd } +// start batch-get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var batchGetOverrides []func( + *cobra.Command, + *marketplace.BatchGetProvidersRequest, +) + +func newBatchGet() *cobra.Command { + cmd := &cobra.Command{} + + var batchGetReq marketplace.BatchGetProvidersRequest + + // TODO: short flags + + // TODO: array: ids + + cmd.Use = "batch-get" + cmd.Short = `Get one batch of providers.` + cmd.Long = `Get one batch of providers. One may specify up to 50 IDs per request. + + Batch get a provider in the Databricks Marketplace with at least one visible + listing.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.ConsumerProviders.BatchGet(ctx, batchGetReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range batchGetOverrides { + fn(cmd, &batchGetReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. diff --git a/go.mod b/go.mod index 1b6c9aeb3..fadeabd6f 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.41.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.42.0 // Apache 2.0 github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause @@ -32,6 +32,8 @@ require ( ) require ( + cloud.google.com/go/auth v0.4.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -42,7 +44,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect @@ -57,13 +59,13 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.22.0 // indirect - golang.org/x/net v0.24.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.169.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect - google.golang.org/grpc v1.62.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/api v0.182.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 723057ad9..7e5dd4bbe 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg= +cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= @@ -28,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.41.0 h1:OyhYY+Q6+gqkWeXmpGEiacoU2RStTeWPF0x4vmqbQdc= -github.com/databricks/databricks-sdk-go v0.41.0/go.mod h1:rLIhh7DvifVLmf2QxMr/vMRGqdrTZazn8VYo4LilfCo= +github.com/databricks/databricks-sdk-go v0.42.0 h1:WKdoqnvb+jvsR9+IYkC3P4BH5eJHRzVOr59y3mCoY+s= +github.com/databricks/databricks-sdk-go v0.42.0/go.mod h1:a9rr0FOHLL26kOjQjZZVFjIYmRABCbrAWVeundDEVG8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -71,9 +75,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -81,7 +84,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -93,8 +95,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= -github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= @@ -170,8 +172,8 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -186,8 +188,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -224,22 +226,22 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= -google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE= +google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= -google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -249,10 +251,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= From aa36aee15995d1fb98ac6eb6889679479ed91bdd Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Tue, 4 Jun 2024 10:57:13 +0200 Subject: [PATCH 210/286] Make dbt-sql and default-sql templates public (#1463) ## Changes This makes the dbt-sql and default-sql templates public. These templates were previously not listed and marked "experimental" since structured streaming tables were still in gated preview and would result in weird error messages when a workspace wasn't enabled for the preview. This PR also incorporates some of the feedback and learnings for these templates so far. --- cmd/bundle/init.go | 4 +--- cmd/bundle/init_test.go | 4 ++++ libs/template/materialize.go | 16 ++++++++------ .../dbt-sql/databricks_template_schema.json | 2 +- .../dbt_profiles/profiles.yml.tmpl | 21 +++++++++++++------ .../resources/{{.project_name}}_job.yml.tmpl | 16 ++++++++------ .../databricks_template_schema.json | 2 +- .../src/orders_daily.sql.tmpl | 18 +++++++++++----- .../{{.project_name}}/src/orders_raw.sql.tmpl | 5 ++++- 9 files changed, 59 insertions(+), 29 deletions(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 6845ab672..c8c59c149 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -38,12 +38,10 @@ var nativeTemplates = []nativeTemplate{ { name: "default-sql", description: "The default SQL template for .sql files that run with Databricks SQL", - hidden: true, }, { name: "dbt-sql", - description: "The dbt SQL template (https://www.databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks)", - hidden: true, + description: "The dbt SQL template (databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks)", }, { name: "mlops-stacks", diff --git a/cmd/bundle/init_test.go b/cmd/bundle/init_test.go index aa8991596..475b2e149 100644 --- a/cmd/bundle/init_test.go +++ b/cmd/bundle/init_test.go @@ -30,6 +30,8 @@ func TestBundleInitRepoName(t *testing.T) { func TestNativeTemplateOptions(t *testing.T) { expected := []cmdio.Tuple{ {Name: "default-python", Id: "The default Python template for Notebooks / Delta Live Tables / Workflows"}, + {Name: "default-sql", Id: "The default SQL template for .sql files that run with Databricks SQL"}, + {Name: "dbt-sql", Id: "The dbt SQL template (databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks)"}, {Name: "mlops-stacks", Id: "The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)"}, {Name: "custom...", Id: "Bring your own template"}, } @@ -38,6 +40,8 @@ func TestNativeTemplateOptions(t *testing.T) { func TestNativeTemplateHelpDescriptions(t *testing.T) { expected := `- default-python: The default Python template for Notebooks / Delta Live Tables / Workflows +- default-sql: The default SQL template for .sql files that run with Databricks SQL +- dbt-sql: The dbt SQL template (databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks) - mlops-stacks: The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)` assert.Equal(t, expected, nativeTemplateHelpDescriptions()) } diff --git a/libs/template/materialize.go b/libs/template/materialize.go index 04f4c8f0c..d824bf381 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -54,12 +54,6 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st return err } - // Print welcome message - welcome := config.schema.WelcomeMessage - if welcome != "" { - cmdio.LogString(ctx, welcome) - } - // Read and assign config values from file if configFilePath != "" { err = config.assignValuesFromFile(configFilePath) @@ -73,6 +67,16 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st return err } + // Print welcome message + welcome := config.schema.WelcomeMessage + if welcome != "" { + welcome, err = r.executeTemplate(welcome) + if err != nil { + return err + } + cmdio.LogString(ctx, welcome) + } + // Prompt user for any missing config values. Assign default values if // terminal is not TTY err = config.promptOrAssignDefaultValues(r) diff --git a/libs/template/templates/dbt-sql/databricks_template_schema.json b/libs/template/templates/dbt-sql/databricks_template_schema.json index 7b39f6187..7fc353521 100644 --- a/libs/template/templates/dbt-sql/databricks_template_schema.json +++ b/libs/template/templates/dbt-sql/databricks_template_schema.json @@ -1,5 +1,5 @@ { - "welcome_message": "\nWelcome to the (EXPERIMENTAL) dbt template for Databricks Asset Bundles!", + "welcome_message": "\nWelcome to the dbt template for Databricks Asset Bundles!\n\nWorkspace selected based on your current profile (see https://docs.databricks.com/dev-tools/cli/profiles.html for how to change this).\nworkspace_host: {{workspace_host}}", "properties": { "project_name": { "type": "string", diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl index d29bd55ce..cce80f8d4 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl @@ -3,26 +3,35 @@ {{- $catalog = "\"\" # workspace default"}} {{- end}} # This file defines dbt profiles for deployed dbt jobs. -# Note that for local development you should create your own, local profile. -# (see README.md). my_dbt_project: target: dev # default target outputs: - dev: + # Doing local development with the dbt CLI? + # Then you should create your own profile in your .dbt/profiles.yml using 'dbt init' + # (See README.md) + + # The default target when deployed with the Databricks CLI + # N.B. when you use dbt from the command line, it uses the profile from .dbt/profiles.yml + dev: type: databricks method: http catalog: {{$catalog}} +{{- if (regexp "^yes").MatchString .personal_schemas}} schema: "{{"{{"}} var('dev_schema') {{"}}"}}" +{{- else}} + schema: "{{.shared_schema}}" +{{- end}} http_path: {{.http_path}} # The workspace host / token are provided by Databricks - # see databricks.yml for the host used for 'dev' + # see databricks.yml for the workspace host used for 'dev' host: "{{"{{"}} env_var('DBT_HOST') {{"}}"}}" token: "{{"{{"}} env_var('DBT_ACCESS_TOKEN') {{"}}"}}" - prod: + # The production target when deployed with the Databricks CLI + prod: type: databricks method: http catalog: {{$catalog}} @@ -31,6 +40,6 @@ my_dbt_project: http_path: {{.http_path}} # The workspace host / token are provided by Databricks - # see databricks.yml for the host used for 'dev' + # see databricks.yml for the workspace host used for 'prod' host: "{{"{{"}} env_var('DBT_HOST') {{"}}"}}" token: "{{"{{"}} env_var('DBT_ACCESS_TOKEN') {{"}}"}}" diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl index 688c23b92..acf1aa480 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl @@ -12,10 +12,6 @@ resources: on_failure: - {{user_name}} -{{- $dev_schema := .shared_schema }} -{{- if (regexp "^yes").MatchString .personal_schemas}} -{{- $dev_schema = "${workspace.current_user.short_name}"}} -{{- end}} tasks: - task_key: dbt @@ -25,9 +21,17 @@ resources: # The default schema, catalog, etc. are defined in ../dbt_profiles/profiles.yml profiles_directory: dbt_profiles/ commands: +{{- if (regexp "^yes").MatchString .personal_schemas}} + # The dbt commands to run (see also dbt_profiles/profiles.yml; dev_schema is used in the dev profile) - 'dbt deps --target=${bundle.target}' - - 'dbt seed --target=${bundle.target} --vars "{ dev_schema: {{$dev_schema}} }"' - - 'dbt run --target=${bundle.target} --vars "{ dev_schema: {{$dev_schema}} }"' + - 'dbt seed --target=${bundle.target} --vars "{ dev_schema: ${workspace.current_user.short_name} }"' + - 'dbt run --target=${bundle.target} --vars "{ dev_schema: ${workspace.current_user.short_name} }"' +{{- else}} + # The dbt commands to run (see also the dev/prod profiles in dbt_profiles/profiles.yml) + - 'dbt deps --target=${bundle.target}' + - 'dbt seed --target=${bundle.target}' + - 'dbt run --target=${bundle.target}' +{{- end}} libraries: - pypi: diff --git a/libs/template/templates/default-sql/databricks_template_schema.json b/libs/template/templates/default-sql/databricks_template_schema.json index b7a42e198..aacd6a0af 100644 --- a/libs/template/templates/default-sql/databricks_template_schema.json +++ b/libs/template/templates/default-sql/databricks_template_schema.json @@ -1,5 +1,5 @@ { - "welcome_message": "\nWelcome to the (EXPERIMENTAL) default SQL template for Databricks Asset Bundles!", + "welcome_message": "\nWelcome to the default SQL template for Databricks Asset Bundles!\n\nWorkspace selected based on your current profile (see https://docs.databricks.com/dev-tools/cli/profiles.html for how to change this).\nworkspace_host: {{workspace_host}}", "properties": { "project_name": { "type": "string", diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl index 76ecadd3e..870fe9c0b 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl @@ -1,14 +1,22 @@ -- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml) -{{- /* We can't use a materialized view here since they don't support 'create or refresh yet.*/}} +{{- /* We can't use a materialized view here since they don't support 'create or refresh' yet.*/}} + +USE CATALOG {{"{{"}}catalog{{"}}"}}; +USE {{"{{"}}schema{{"}}"}}; CREATE OR REPLACE VIEW - IDENTIFIER(CONCAT({{"{{"}}catalog{{"}}"}}, '.', {{"{{"}}schema{{"}}"}}, '.', 'orders_daily')) + orders_daily AS SELECT order_date, count(*) AS number_of_orders FROM - IDENTIFIER(CONCAT({{"{{"}}catalog{{"}}"}}, '.', {{"{{"}}schema{{"}}"}}, '.', 'orders_raw')) + orders_raw --- During development, only process a smaller range of data -WHERE {{"{{"}}bundle_target{{"}}"}} == "prod" OR (order_date >= '2019-08-01' AND order_date < '2019-09-01') +WHERE if( + {{"{{"}}bundle_target{{"}}"}} != "prod", + true, + + -- During development, only process a smaller range of data + order_date >= '2019-08-01' AND order_date < '2019-09-01' +) GROUP BY order_date diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl index 96769062b..d5891895a 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl @@ -3,8 +3,11 @@ -- The streaming table below ingests all JSON files in /databricks-datasets/retail-org/sales_orders/ -- See also https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-streaming-table.html +USE CATALOG {{"{{"}}catalog{{"}}"}}; +USE {{"{{"}}schema{{"}}"}}; + CREATE OR REFRESH STREAMING TABLE - IDENTIFIER(CONCAT({{"{{"}}catalog{{"}}"}}, '.', {{"{{"}}schema{{"}}"}}, '.', 'orders_raw')) + orders_raw AS SELECT customer_name, DATE(TIMESTAMP(FROM_UNIXTIME(TRY_CAST(order_datetime AS BIGINT)))) AS order_date, From 448d41027d7771a3d336db054ee372a81bc11cdc Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 4 Jun 2024 11:53:14 +0200 Subject: [PATCH 211/286] Fix listing notebooks in a subdirectory (#1468) ## Changes This worked fine if the notebooks are located in the filer's root and didn't if they are nested in a directory. This change adds test coverage and fixes the underlying issue. ## Tests Ran integration test manually. --- internal/filer_test.go | 24 ++++++++++++++++++- .../workspace_files_extensions_client.go | 2 +- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/internal/filer_test.go b/internal/filer_test.go index 3361de5bc..275304256 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -511,6 +511,7 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { content string }{ {"dir1/dir2/dir3/file.txt", "file content"}, + {"dir1/notebook.py", "# Databricks notebook source\nprint('first upload'))"}, {"foo.py", "print('foo')"}, {"foo.r", "print('foo')"}, {"foo.scala", "println('foo')"}, @@ -523,6 +524,16 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { {"sqlNb.sql", "-- Databricks notebook source\n SELECT \"first upload\""}, } + // Assert that every file has a unique basename + basenames := map[string]struct{}{} + for _, f := range files { + basename := path.Base(f.name) + if _, ok := basenames[basename]; ok { + t.Fatalf("basename %s is not unique", basename) + } + basenames[basename] = struct{}{} + } + ctx := context.Background() wf, _ := setupWsfsExtensionsFiler(t) @@ -534,7 +545,6 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { // Read entries entries, err := wf.ReadDir(ctx, ".") require.NoError(t, err) - assert.Len(t, entries, len(files)) names := []string{} for _, e := range entries { names = append(names, e.Name()) @@ -552,6 +562,18 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { "scalaNb.scala", "sqlNb.sql", }, names) + + // Read entries in subdirectory + entries, err = wf.ReadDir(ctx, "dir1") + require.NoError(t, err) + names = []string{} + for _, e := range entries { + names = append(names, e.Name()) + } + assert.Equal(t, []string{ + "dir2", + "notebook.py", + }, names) } func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go index bad748b10..3ce6913af 100644 --- a/libs/filer/workspace_files_extensions_client.go +++ b/libs/filer/workspace_files_extensions_client.go @@ -235,7 +235,7 @@ func (w *workspaceFilesExtensionsClient) ReadDir(ctx context.Context, name strin // If the object is a notebook, include an extension in the entry. if sysInfo.ObjectType == workspace.ObjectTypeNotebook { - stat, err := w.getNotebookStatByNameWithoutExt(ctx, entries[i].Name()) + stat, err := w.getNotebookStatByNameWithoutExt(ctx, path.Join(name, entries[i].Name())) if err != nil { return nil, err } From f8b2cb89d5134809a6902da16a3eee81d074603d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 4 Jun 2024 13:03:19 +0200 Subject: [PATCH 212/286] Release v0.221.0 (#1470) CLI: * Update OpenAPI spec ([#1466](https://github.com/databricks/cli/pull/1466)). Bundles: * Upgrade TF provider to 1.46.0 ([#1460](https://github.com/databricks/cli/pull/1460)). * Add support for Lakehouse monitoring ([#1307](https://github.com/databricks/cli/pull/1307)). * Make dbt-sql and default-sql templates public ([#1463](https://github.com/databricks/cli/pull/1463)). Internal: * Abstract over filesystem interaction with libs/vfs ([#1452](https://github.com/databricks/cli/pull/1452)). * Add `filer.Filer` to read notebooks from WSFS without omitting their extension ([#1457](https://github.com/databricks/cli/pull/1457)). * Fix listing notebooks in a subdirectory ([#1468](https://github.com/databricks/cli/pull/1468)). API Changes: * Changed `databricks account storage-credentials list` command to return . * Added `databricks consumer-listings batch-get` command. * Added `databricks consumer-providers batch-get` command. * Removed `databricks apps create-deployment` command. * Added `databricks apps deploy` command. OpenAPI commit 37b925eba37dfb3d7e05b6ba2d458454ce62d3a0 (2024-06-03) Dependency updates: * Bump github.com/hashicorp/go-version from 1.6.0 to 1.7.0 ([#1454](https://github.com/databricks/cli/pull/1454)). * Bump github.com/hashicorp/hc-install from 0.6.4 to 0.7.0 ([#1453](https://github.com/databricks/cli/pull/1453)). --- CHANGELOG.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2fb35d479..568c616b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,33 @@ # Version changelog +## 0.221.0 + +CLI: + * Update OpenAPI spec ([#1466](https://github.com/databricks/cli/pull/1466)). + +Bundles: + * Upgrade TF provider to 1.46.0 ([#1460](https://github.com/databricks/cli/pull/1460)). + * Add support for Lakehouse monitoring ([#1307](https://github.com/databricks/cli/pull/1307)). + * Make dbt-sql and default-sql templates public ([#1463](https://github.com/databricks/cli/pull/1463)). + +Internal: + * Abstract over filesystem interaction with libs/vfs ([#1452](https://github.com/databricks/cli/pull/1452)). + * Add `filer.Filer` to read notebooks from WSFS without omitting their extension ([#1457](https://github.com/databricks/cli/pull/1457)). + * Fix listing notebooks in a subdirectory ([#1468](https://github.com/databricks/cli/pull/1468)). + +API Changes: + * Changed `databricks account storage-credentials list` command to return . + * Added `databricks consumer-listings batch-get` command. + * Added `databricks consumer-providers batch-get` command. + * Removed `databricks apps create-deployment` command. + * Added `databricks apps deploy` command. + +OpenAPI commit 37b925eba37dfb3d7e05b6ba2d458454ce62d3a0 (2024-06-03) + +Dependency updates: + * Bump github.com/hashicorp/go-version from 1.6.0 to 1.7.0 ([#1454](https://github.com/databricks/cli/pull/1454)). + * Bump github.com/hashicorp/hc-install from 0.6.4 to 0.7.0 ([#1453](https://github.com/databricks/cli/pull/1453)). + ## 0.220.0 CLI: From 4bc0ea0af35f4b29bbd66cc278a64f29c4fecb12 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Tue, 4 Jun 2024 17:40:40 +0200 Subject: [PATCH 213/286] Fix SQL schema selection in default-sql template (#1471) ## Changes This fixes a last-minute regression that snuck into https://github.com/databricks/cli/pull/1463: unfortunately we need to use `USE IDENTIFIER('schema')` to select a schema for now. In the future we expect we can just use `USE SCHEMA 'schema'`. --- .../template/{{.project_name}}/src/orders_daily.sql.tmpl | 2 +- .../template/{{.project_name}}/src/orders_raw.sql.tmpl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl index 870fe9c0b..7c86f9212 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl @@ -2,7 +2,7 @@ {{- /* We can't use a materialized view here since they don't support 'create or refresh' yet.*/}} USE CATALOG {{"{{"}}catalog{{"}}"}}; -USE {{"{{"}}schema{{"}}"}}; +USE IDENTIFIER({{"{{"}}schema{{"}}"}}); CREATE OR REPLACE VIEW orders_daily diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl index d5891895a..c73606ef1 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl @@ -4,7 +4,7 @@ -- See also https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-streaming-table.html USE CATALOG {{"{{"}}catalog{{"}}"}}; -USE {{"{{"}}schema{{"}}"}}; +USE IDENTIFIER({{"{{"}}schema{{"}}"}}); CREATE OR REFRESH STREAMING TABLE orders_raw From 41678fa695ff197b73053d4cf8e26613da78ef6f Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Wed, 5 Jun 2024 13:13:32 +0200 Subject: [PATCH 214/286] Copy-editing for SQL templates (#1474) ## Changes This applies changes suggested by @juliacrawf-db --- .../templates/dbt-sql/databricks_template_schema.json | 4 ++-- .../templates/default-sql/databricks_template_schema.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/template/templates/dbt-sql/databricks_template_schema.json b/libs/template/templates/dbt-sql/databricks_template_schema.json index 7fc353521..cccf145dc 100644 --- a/libs/template/templates/dbt-sql/databricks_template_schema.json +++ b/libs/template/templates/dbt-sql/databricks_template_schema.json @@ -1,5 +1,5 @@ { - "welcome_message": "\nWelcome to the dbt template for Databricks Asset Bundles!\n\nWorkspace selected based on your current profile (see https://docs.databricks.com/dev-tools/cli/profiles.html for how to change this).\nworkspace_host: {{workspace_host}}", + "welcome_message": "\nWelcome to the dbt template for Databricks Asset Bundles!\n\nA workspace was selected based on your current profile. For information about how to change this, see https://docs.databricks.com/dev-tools/cli/profiles.html.\nworkspace_host: {{workspace_host}}", "properties": { "project_name": { "type": "string", @@ -13,7 +13,7 @@ "type": "string", "pattern": "^/sql/.\\../warehouses/[a-z0-9]+$", "pattern_match_failure_message": "Path must be of the form /sql/1.0/warehouses/", - "description": " \nPlease provide the HTTP Path of the SQL warehouse you would like to use with dbt during development.\nYou can find this path by clicking on \"Connection details\" for your SQL warehouse.\nhttp_path [example: /sql/1.0/warehouses/abcdef1234567890]", + "description": "\nPlease provide the HTTP Path of the SQL warehouse you would like to use with dbt during development.\nYou can find this path by clicking on \"Connection details\" for your SQL warehouse.\nhttp_path [example: /sql/1.0/warehouses/abcdef1234567890]", "order": 2 }, "default_catalog": { diff --git a/libs/template/templates/default-sql/databricks_template_schema.json b/libs/template/templates/default-sql/databricks_template_schema.json index aacd6a0af..329f91962 100644 --- a/libs/template/templates/default-sql/databricks_template_schema.json +++ b/libs/template/templates/default-sql/databricks_template_schema.json @@ -1,5 +1,5 @@ { - "welcome_message": "\nWelcome to the default SQL template for Databricks Asset Bundles!\n\nWorkspace selected based on your current profile (see https://docs.databricks.com/dev-tools/cli/profiles.html for how to change this).\nworkspace_host: {{workspace_host}}", + "welcome_message": "\nWelcome to the default SQL template for Databricks Asset Bundles!\n\nA workspace was selected based on your current profile. For information about how to change this, see https://docs.databricks.com/dev-tools/cli/profiles.html.\nworkspace_host: {{workspace_host}}", "properties": { "project_name": { "type": "string", From 311dfa46423089bea9079dabff951c9a10911750 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 5 Jun 2024 13:33:43 +0200 Subject: [PATCH 215/286] Upgrade TF provider to 1.47.0 (#1476) ## Changes This includes a bugfix for provisioning jobs with `num_workers = 0`. Fixes #1472. ## Tests Manually tested this fixes the issue. --- bundle/internal/tf/codegen/schema/version.go | 2 +- ...ce_aws_unity_catalog_assume_role_policy.go | 12 ++ bundle/internal/tf/schema/data_sources.go | 194 +++++++++--------- .../tf/schema/resource_mws_workspaces.go | 1 + .../tf/schema/resource_storage_credential.go | 1 + .../tf/schema/resource_system_schema.go | 1 + bundle/internal/tf/schema/root.go | 2 +- 7 files changed, 115 insertions(+), 98 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index f55b6c4f0..9595433a8 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.46.0" +const ProviderVersion = "1.47.0" diff --git a/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go b/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go new file mode 100644 index 000000000..14d5c169d --- /dev/null +++ b/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go @@ -0,0 +1,12 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAwsUnityCatalogAssumeRolePolicy struct { + AwsAccountId string `json:"aws_account_id"` + ExternalId string `json:"external_id"` + Id string `json:"id,omitempty"` + Json string `json:"json,omitempty"` + RoleName string `json:"role_name"` + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index c32483db0..b68df2b40 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -3,105 +3,107 @@ package schema type DataSources struct { - AwsAssumeRolePolicy map[string]any `json:"databricks_aws_assume_role_policy,omitempty"` - AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` - AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` - AwsUnityCatalogPolicy map[string]any `json:"databricks_aws_unity_catalog_policy,omitempty"` - Catalog map[string]any `json:"databricks_catalog,omitempty"` - Catalogs map[string]any `json:"databricks_catalogs,omitempty"` - Cluster map[string]any `json:"databricks_cluster,omitempty"` - ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` - Clusters map[string]any `json:"databricks_clusters,omitempty"` - CurrentConfig map[string]any `json:"databricks_current_config,omitempty"` - CurrentMetastore map[string]any `json:"databricks_current_metastore,omitempty"` - CurrentUser map[string]any `json:"databricks_current_user,omitempty"` - DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` - DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"` - Directory map[string]any `json:"databricks_directory,omitempty"` - ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` - ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"` - Group map[string]any `json:"databricks_group,omitempty"` - InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` - InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"` - Job map[string]any `json:"databricks_job,omitempty"` - Jobs map[string]any `json:"databricks_jobs,omitempty"` - Metastore map[string]any `json:"databricks_metastore,omitempty"` - Metastores map[string]any `json:"databricks_metastores,omitempty"` - MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` - MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` - MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` - MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` - NodeType map[string]any `json:"databricks_node_type,omitempty"` - Notebook map[string]any `json:"databricks_notebook,omitempty"` - NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"` - Pipelines map[string]any `json:"databricks_pipelines,omitempty"` - Schemas map[string]any `json:"databricks_schemas,omitempty"` - ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` - ServicePrincipals map[string]any `json:"databricks_service_principals,omitempty"` - Share map[string]any `json:"databricks_share,omitempty"` - Shares map[string]any `json:"databricks_shares,omitempty"` - SparkVersion map[string]any `json:"databricks_spark_version,omitempty"` - SqlWarehouse map[string]any `json:"databricks_sql_warehouse,omitempty"` - SqlWarehouses map[string]any `json:"databricks_sql_warehouses,omitempty"` - StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` - StorageCredentials map[string]any `json:"databricks_storage_credentials,omitempty"` - Table map[string]any `json:"databricks_table,omitempty"` - Tables map[string]any `json:"databricks_tables,omitempty"` - User map[string]any `json:"databricks_user,omitempty"` - Views map[string]any `json:"databricks_views,omitempty"` - Volumes map[string]any `json:"databricks_volumes,omitempty"` - Zones map[string]any `json:"databricks_zones,omitempty"` + AwsAssumeRolePolicy map[string]any `json:"databricks_aws_assume_role_policy,omitempty"` + AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` + AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` + AwsUnityCatalogAssumeRolePolicy map[string]any `json:"databricks_aws_unity_catalog_assume_role_policy,omitempty"` + AwsUnityCatalogPolicy map[string]any `json:"databricks_aws_unity_catalog_policy,omitempty"` + Catalog map[string]any `json:"databricks_catalog,omitempty"` + Catalogs map[string]any `json:"databricks_catalogs,omitempty"` + Cluster map[string]any `json:"databricks_cluster,omitempty"` + ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` + Clusters map[string]any `json:"databricks_clusters,omitempty"` + CurrentConfig map[string]any `json:"databricks_current_config,omitempty"` + CurrentMetastore map[string]any `json:"databricks_current_metastore,omitempty"` + CurrentUser map[string]any `json:"databricks_current_user,omitempty"` + DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` + DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"` + Directory map[string]any `json:"databricks_directory,omitempty"` + ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` + ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"` + Group map[string]any `json:"databricks_group,omitempty"` + InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` + InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"` + Job map[string]any `json:"databricks_job,omitempty"` + Jobs map[string]any `json:"databricks_jobs,omitempty"` + Metastore map[string]any `json:"databricks_metastore,omitempty"` + Metastores map[string]any `json:"databricks_metastores,omitempty"` + MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` + MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` + MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` + MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` + NodeType map[string]any `json:"databricks_node_type,omitempty"` + Notebook map[string]any `json:"databricks_notebook,omitempty"` + NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"` + Pipelines map[string]any `json:"databricks_pipelines,omitempty"` + Schemas map[string]any `json:"databricks_schemas,omitempty"` + ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` + ServicePrincipals map[string]any `json:"databricks_service_principals,omitempty"` + Share map[string]any `json:"databricks_share,omitempty"` + Shares map[string]any `json:"databricks_shares,omitempty"` + SparkVersion map[string]any `json:"databricks_spark_version,omitempty"` + SqlWarehouse map[string]any `json:"databricks_sql_warehouse,omitempty"` + SqlWarehouses map[string]any `json:"databricks_sql_warehouses,omitempty"` + StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` + StorageCredentials map[string]any `json:"databricks_storage_credentials,omitempty"` + Table map[string]any `json:"databricks_table,omitempty"` + Tables map[string]any `json:"databricks_tables,omitempty"` + User map[string]any `json:"databricks_user,omitempty"` + Views map[string]any `json:"databricks_views,omitempty"` + Volumes map[string]any `json:"databricks_volumes,omitempty"` + Zones map[string]any `json:"databricks_zones,omitempty"` } func NewDataSources() *DataSources { return &DataSources{ - AwsAssumeRolePolicy: make(map[string]any), - AwsBucketPolicy: make(map[string]any), - AwsCrossaccountPolicy: make(map[string]any), - AwsUnityCatalogPolicy: make(map[string]any), - Catalog: make(map[string]any), - Catalogs: make(map[string]any), - Cluster: make(map[string]any), - ClusterPolicy: make(map[string]any), - Clusters: make(map[string]any), - CurrentConfig: make(map[string]any), - CurrentMetastore: make(map[string]any), - CurrentUser: make(map[string]any), - DbfsFile: make(map[string]any), - DbfsFilePaths: make(map[string]any), - Directory: make(map[string]any), - ExternalLocation: make(map[string]any), - ExternalLocations: make(map[string]any), - Group: make(map[string]any), - InstancePool: make(map[string]any), - InstanceProfiles: make(map[string]any), - Job: make(map[string]any), - Jobs: make(map[string]any), - Metastore: make(map[string]any), - Metastores: make(map[string]any), - MlflowExperiment: make(map[string]any), - MlflowModel: make(map[string]any), - MwsCredentials: make(map[string]any), - MwsWorkspaces: make(map[string]any), - NodeType: make(map[string]any), - Notebook: make(map[string]any), - NotebookPaths: make(map[string]any), - Pipelines: make(map[string]any), - Schemas: make(map[string]any), - ServicePrincipal: make(map[string]any), - ServicePrincipals: make(map[string]any), - Share: make(map[string]any), - Shares: make(map[string]any), - SparkVersion: make(map[string]any), - SqlWarehouse: make(map[string]any), - SqlWarehouses: make(map[string]any), - StorageCredential: make(map[string]any), - StorageCredentials: make(map[string]any), - Table: make(map[string]any), - Tables: make(map[string]any), - User: make(map[string]any), - Views: make(map[string]any), - Volumes: make(map[string]any), - Zones: make(map[string]any), + AwsAssumeRolePolicy: make(map[string]any), + AwsBucketPolicy: make(map[string]any), + AwsCrossaccountPolicy: make(map[string]any), + AwsUnityCatalogAssumeRolePolicy: make(map[string]any), + AwsUnityCatalogPolicy: make(map[string]any), + Catalog: make(map[string]any), + Catalogs: make(map[string]any), + Cluster: make(map[string]any), + ClusterPolicy: make(map[string]any), + Clusters: make(map[string]any), + CurrentConfig: make(map[string]any), + CurrentMetastore: make(map[string]any), + CurrentUser: make(map[string]any), + DbfsFile: make(map[string]any), + DbfsFilePaths: make(map[string]any), + Directory: make(map[string]any), + ExternalLocation: make(map[string]any), + ExternalLocations: make(map[string]any), + Group: make(map[string]any), + InstancePool: make(map[string]any), + InstanceProfiles: make(map[string]any), + Job: make(map[string]any), + Jobs: make(map[string]any), + Metastore: make(map[string]any), + Metastores: make(map[string]any), + MlflowExperiment: make(map[string]any), + MlflowModel: make(map[string]any), + MwsCredentials: make(map[string]any), + MwsWorkspaces: make(map[string]any), + NodeType: make(map[string]any), + Notebook: make(map[string]any), + NotebookPaths: make(map[string]any), + Pipelines: make(map[string]any), + Schemas: make(map[string]any), + ServicePrincipal: make(map[string]any), + ServicePrincipals: make(map[string]any), + Share: make(map[string]any), + Shares: make(map[string]any), + SparkVersion: make(map[string]any), + SqlWarehouse: make(map[string]any), + SqlWarehouses: make(map[string]any), + StorageCredential: make(map[string]any), + StorageCredentials: make(map[string]any), + Table: make(map[string]any), + Tables: make(map[string]any), + User: make(map[string]any), + Views: make(map[string]any), + Volumes: make(map[string]any), + Zones: make(map[string]any), } } diff --git a/bundle/internal/tf/schema/resource_mws_workspaces.go b/bundle/internal/tf/schema/resource_mws_workspaces.go index 21d1ce428..6c053cb84 100644 --- a/bundle/internal/tf/schema/resource_mws_workspaces.go +++ b/bundle/internal/tf/schema/resource_mws_workspaces.go @@ -43,6 +43,7 @@ type ResourceMwsWorkspaces struct { CustomTags map[string]string `json:"custom_tags,omitempty"` CustomerManagedKeyId string `json:"customer_managed_key_id,omitempty"` DeploymentName string `json:"deployment_name,omitempty"` + GcpWorkspaceSa string `json:"gcp_workspace_sa,omitempty"` Id string `json:"id,omitempty"` IsNoPublicIpEnabled bool `json:"is_no_public_ip_enabled,omitempty"` Location string `json:"location,omitempty"` diff --git a/bundle/internal/tf/schema/resource_storage_credential.go b/bundle/internal/tf/schema/resource_storage_credential.go index 3d4a501ea..b565a5c78 100644 --- a/bundle/internal/tf/schema/resource_storage_credential.go +++ b/bundle/internal/tf/schema/resource_storage_credential.go @@ -41,6 +41,7 @@ type ResourceStorageCredential struct { Owner string `json:"owner,omitempty"` ReadOnly bool `json:"read_only,omitempty"` SkipValidation bool `json:"skip_validation,omitempty"` + StorageCredentialId string `json:"storage_credential_id,omitempty"` AwsIamRole *ResourceStorageCredentialAwsIamRole `json:"aws_iam_role,omitempty"` AzureManagedIdentity *ResourceStorageCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"` AzureServicePrincipal *ResourceStorageCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"` diff --git a/bundle/internal/tf/schema/resource_system_schema.go b/bundle/internal/tf/schema/resource_system_schema.go index 09a86103a..fe5b128d6 100644 --- a/bundle/internal/tf/schema/resource_system_schema.go +++ b/bundle/internal/tf/schema/resource_system_schema.go @@ -3,6 +3,7 @@ package schema type ResourceSystemSchema struct { + FullName string `json:"full_name,omitempty"` Id string `json:"id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Schema string `json:"schema,omitempty"` diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index e4ca67740..53f892030 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.46.0" +const ProviderVersion = "1.47.0" func NewRoot() *Root { return &Root{ From 1451361c9f89ede1d7f8ed15e7ae592ab45f47e0 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 5 Jun 2024 13:33:51 +0200 Subject: [PATCH 216/286] Use latest version of goreleaser action (#1477) ## Changes Same as https://github.com/databricks/terraform-provider-databricks/pull/3645. ## Tests n/a --- .github/workflows/release.yml | 4 ++-- .goreleaser.yaml | 8 +++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8643ac355..bde5b377b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -39,9 +39,9 @@ jobs: - name: Run GoReleaser id: releaser - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v6 with: - version: latest + version: ~> v2 args: release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.goreleaser.yaml b/.goreleaser.yaml index d37876edb..3f0bdb2c5 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -1,6 +1,9 @@ +version: 2 + before: hooks: - - go mod tidy + - go mod download + builds: - env: - CGO_ENABLED=0 @@ -36,6 +39,7 @@ builds: - amd64 - arm64 binary: databricks + archives: - format: zip @@ -89,8 +93,10 @@ docker_manifests: checksum: name_template: 'databricks_cli_{{ .Version }}_SHA256SUMS' algorithm: sha256 + snapshot: name_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}' + changelog: sort: asc filters: From 8c9fff3cb977329dc149416fdfbe7563f60bd3f8 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 5 Jun 2024 14:29:20 +0200 Subject: [PATCH 217/286] Release v0.221.1 (#1478) Bundles: This releases fixes an issue introduced in v0.221.0 where managing jobs with a single-node cluster would fail. * Fix SQL schema selection in default-sql template ([#1471](https://github.com/databricks/cli/pull/1471)). * Copy-editing for SQL templates ([#1474](https://github.com/databricks/cli/pull/1474)). * Upgrade TF provider to 1.47.0 ([#1476](https://github.com/databricks/cli/pull/1476)). Internal: * Use latest version of goreleaser action ([#1477](https://github.com/databricks/cli/pull/1477)). --- CHANGELOG.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 568c616b3..8f6f47dc6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Version changelog +## 0.221.1 + +Bundles: + +This releases fixes an issue introduced in v0.221.0 where managing jobs with a single-node cluster would fail. + + * Fix SQL schema selection in default-sql template ([#1471](https://github.com/databricks/cli/pull/1471)). + * Copy-editing for SQL templates ([#1474](https://github.com/databricks/cli/pull/1474)). + * Upgrade TF provider to 1.47.0 ([#1476](https://github.com/databricks/cli/pull/1476)). + +Internal: + * Use latest version of goreleaser action ([#1477](https://github.com/databricks/cli/pull/1477)). + + + ## 0.221.0 CLI: From 35186d5ddbfd47e81773ef95853f741e73763ff4 Mon Sep 17 00:00:00 2001 From: Arpit Jasapara <87999496+arpitjasa-db@users.noreply.github.com> Date: Thu, 6 Jun 2024 00:11:23 -0700 Subject: [PATCH 218/286] Add randIntn function (#1475) ## Changes Add support for `math/rand.Intn` to DAB templates. ## Tests Unit tests. --- libs/template/helpers.go | 5 +++++ libs/template/helpers_test.go | 19 +++++++++++++++++++ .../testdata/random-int/template/hello.tmpl | 1 + 3 files changed, 25 insertions(+) create mode 100644 libs/template/testdata/random-int/template/hello.tmpl diff --git a/libs/template/helpers.go b/libs/template/helpers.go index d15a801d6..b3dea329e 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "math/rand" "net/url" "os" "regexp" @@ -46,6 +47,10 @@ func loadHelpers(ctx context.Context) template.FuncMap { "regexp": func(expr string) (*regexp.Regexp, error) { return regexp.Compile(expr) }, + // Alias for https://pkg.go.dev/math/rand#Intn. Returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n). + "random_int": func(n int) int { + return rand.Intn(n) + }, // A key value pair. This is used with the map function to generate maps // to use inside a template "pair": func(k string, v any) pair { diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index a07b26f81..c0848c8d0 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -3,6 +3,7 @@ package template import ( "context" "os" + "strconv" "strings" "testing" @@ -50,6 +51,24 @@ func TestTemplateRegexpCompileFunction(t *testing.T) { assert.Contains(t, content, "1:fool") } +func TestTemplateRandIntFunction(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/random-int/template", "./testdata/random-int/library", tmpDir) + require.NoError(t, err) + + err = r.walk() + assert.NoError(t, err) + + assert.Len(t, r.files, 1) + randInt, err := strconv.Atoi(strings.TrimSpace(string(r.files[0].(*inMemoryFile).content))) + assert.Less(t, randInt, 10) + assert.Empty(t, err) +} + func TestTemplateUrlFunction(t *testing.T) { ctx := context.Background() tmpDir := t.TempDir() diff --git a/libs/template/testdata/random-int/template/hello.tmpl b/libs/template/testdata/random-int/template/hello.tmpl new file mode 100644 index 000000000..46dc63fb6 --- /dev/null +++ b/libs/template/testdata/random-int/template/hello.tmpl @@ -0,0 +1 @@ +{{print (random_int 10)}} From 99c7d136d6cf20e35d8ca7499cffea4c59259520 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Thu, 6 Jun 2024 09:40:15 +0200 Subject: [PATCH 219/286] Fix conditional in query in `default-sql` template (#1479) ## Changes This corrects a mistake in the sample SQL identified by @pietern --- .../template/{{.project_name}}/src/orders_daily.sql.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl index 7c86f9212..8a9d12ea8 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl @@ -12,7 +12,7 @@ FROM orders_raw WHERE if( - {{"{{"}}bundle_target{{"}}"}} != "prod", + {{"{{"}}bundle_target{{"}}"}} = "prod", true, -- During development, only process a smaller range of data From b92e072addfa9ec057fb41f9b907af9b6b836b10 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 13:47:22 +0200 Subject: [PATCH 220/286] Bump golang.org/x/text from 0.15.0 to 0.16.0 (#1482) Bumps [golang.org/x/text](https://github.com/golang/text) from 0.15.0 to 0.16.0.
Commits
  • 9c2f3a2 cmd/gotext: fix segfault in extract & rewrite commands
  • 59e1219 message: optimize lookupAndFormat function for better performance
  • a20a3e2 x/text: update x/tools for go/ssa range-over-func fix
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/text&package-manager=go_modules&previous-version=0.15.0&new-version=0.16.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index fadeabd6f..f8eb6dc93 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( golang.org/x/oauth2 v0.20.0 golang.org/x/sync v0.7.0 golang.org/x/term v0.20.0 - golang.org/x/text v0.15.0 + golang.org/x/text v0.16.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/go.sum b/go.sum index 7e5dd4bbe..7746f2232 100644 --- a/go.sum +++ b/go.sum @@ -214,8 +214,8 @@ golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -223,8 +223,8 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE= google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM= From 3d3ab50ff935c9aa496182db030f0dec41def83b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 12:02:59 +0000 Subject: [PATCH 221/286] Bump golang.org/x/term from 0.20.0 to 0.21.0 (#1483) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.20.0 to 0.21.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.20.0&new-version=0.21.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index f8eb6dc93..79fc49fba 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( golang.org/x/mod v0.17.0 golang.org/x/oauth2 v0.20.0 golang.org/x/sync v0.7.0 - golang.org/x/term v0.20.0 + golang.org/x/term v0.21.0 golang.org/x/text v0.16.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 @@ -61,7 +61,7 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/crypto v0.23.0 // indirect golang.org/x/net v0.25.0 // indirect - golang.org/x/sys v0.20.0 // indirect + golang.org/x/sys v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.182.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect diff --git a/go.sum b/go.sum index 7746f2232..49d4f693b 100644 --- a/go.sum +++ b/go.sum @@ -208,10 +208,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= From 645e9ba8c4b07cd5732dbb18de74d370601715a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 14:03:29 +0200 Subject: [PATCH 222/286] Bump golang.org/x/mod from 0.17.0 to 0.18.0 (#1484) Bumps [golang.org/x/mod](https://github.com/golang/mod) from 0.17.0 to 0.18.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/mod&package-manager=go_modules&previous-version=0.17.0&new-version=0.18.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 79fc49fba..717f17ebf 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/mod v0.17.0 + golang.org/x/mod v0.18.0 golang.org/x/oauth2 v0.20.0 golang.org/x/sync v0.7.0 golang.org/x/term v0.21.0 diff --git a/go.sum b/go.sum index 49d4f693b..dbadfe347 100644 --- a/go.sum +++ b/go.sum @@ -180,8 +180,8 @@ golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= From a5e89fd3827f3f52379ea7a8d281fdee8063c932 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 12:19:21 +0000 Subject: [PATCH 223/286] Bump golang.org/x/oauth2 from 0.20.0 to 0.21.0 (#1485) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.20.0 to 0.21.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/oauth2&package-manager=go_modules&previous-version=0.20.0&new-version=0.21.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 717f17ebf..727ac702f 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.18.0 - golang.org/x/oauth2 v0.20.0 + golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 golang.org/x/term v0.21.0 golang.org/x/text v0.16.0 diff --git a/go.sum b/go.sum index dbadfe347..38db2aa69 100644 --- a/go.sum +++ b/go.sum @@ -191,8 +191,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 44e3928d6ad8f85f04041f8dd4cde25f32bfbe19 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 17 Jun 2024 15:18:52 +0530 Subject: [PATCH 224/286] Avoid multiple file tree traversals on bundle deploy (#1493) ## Changes To run bundle deploy from DBR we use an abstraction over the workspace import / export APIs to create a `filer.Filer` and abstract the file system. Walking the file tree in such a filer is expensive and requires multiple API calls. This PR remove the two duplicate file tree walks that happen by caching the result. --- bundle/bundle.go | 4 ++ bundle/deploy/files/upload.go | 2 +- bundle/deploy/state_update.go | 16 +---- bundle/deploy/state_update_test.go | 101 +++++++++++++---------------- cmd/bundle/sync.go | 3 +- cmd/sync/sync.go | 2 +- libs/sync/sync.go | 21 +++--- 7 files changed, 68 insertions(+), 81 deletions(-) diff --git a/bundle/bundle.go b/bundle/bundle.go index 1dc98656a..482614b9a 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -16,6 +16,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/bundle/metadata" + "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/locker" @@ -50,6 +51,9 @@ type Bundle struct { clientOnce sync.Once client *databricks.WorkspaceClient + // Files that are synced to the workspace.file_path + Files []fileset.File + // Stores an initialized copy of this bundle's Terraform wrapper. Terraform *tfexec.Terraform diff --git a/bundle/deploy/files/upload.go b/bundle/deploy/files/upload.go index fa20ed4ea..2c126623e 100644 --- a/bundle/deploy/files/upload.go +++ b/bundle/deploy/files/upload.go @@ -23,7 +23,7 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return diag.FromErr(err) } - err = sync.RunOnce(ctx) + b.Files, err = sync.RunOnce(ctx) if err != nil { return diag.FromErr(err) } diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go index 6903a9f87..bfdb308c4 100644 --- a/bundle/deploy/state_update.go +++ b/bundle/deploy/state_update.go @@ -11,7 +11,6 @@ import ( "time" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" @@ -40,19 +39,8 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost state.CliVersion = build.GetInfo().Version state.Version = DeploymentStateVersion - // Get the current file list. - sync, err := files.GetSync(ctx, bundle.ReadOnly(b)) - if err != nil { - return diag.FromErr(err) - } - - files, err := sync.GetFileList(ctx) - if err != nil { - return diag.FromErr(err) - } - - // Update the state with the current file list. - fl, err := FromSlice(files) + // Update the state with the current list of synced files. + fl, err := FromSlice(b.Files) if err != nil { return diag.FromErr(err) } diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go index dd8a1336e..ed72439d2 100644 --- a/bundle/deploy/state_update_test.go +++ b/bundle/deploy/state_update_test.go @@ -10,19 +10,23 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/internal/build" "github.com/databricks/cli/internal/testutil" - databrickscfg "github.com/databricks/databricks-sdk-go/config" - "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go/service/iam" - "github.com/databricks/databricks-sdk-go/service/workspace" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -func TestStateUpdate(t *testing.T) { - s := &stateUpdate{} +func setupBundleForStateUpdate(t *testing.T) *bundle.Bundle { + tmpDir := t.TempDir() - b := &bundle.Bundle{ - RootPath: t.TempDir(), + testutil.Touch(t, tmpDir, "test1.py") + testutil.TouchNotebook(t, tmpDir, "test2.py") + + files, err := fileset.New(vfs.MustNew(tmpDir)).All() + require.NoError(t, err) + + return &bundle.Bundle{ + RootPath: tmpDir, Config: config.Root{ Bundle: config.Bundle{ Target: "default", @@ -37,22 +41,14 @@ func TestStateUpdate(t *testing.T) { }, }, }, + Files: files, } +} - testutil.Touch(t, b.RootPath, "test1.py") - testutil.Touch(t, b.RootPath, "test2.py") - - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &databrickscfg.Config{ - Host: "https://test.com", - } - b.SetWorkpaceClient(m.WorkspaceClient) - - wsApi := m.GetMockWorkspaceAPI() - wsApi.EXPECT().GetStatusByPath(mock.Anything, "/files").Return(&workspace.ObjectInfo{ - ObjectType: "DIRECTORY", - }, nil) +func TestStateUpdate(t *testing.T) { + s := &stateUpdate{} + b := setupBundleForStateUpdate(t) ctx := context.Background() diags := bundle.Apply(ctx, b, s) @@ -63,7 +59,15 @@ func TestStateUpdate(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(1), state.Seq) - require.Len(t, state.Files, 3) + require.Equal(t, state.Files, Filelist{ + { + LocalPath: "test1.py", + }, + { + LocalPath: "test2.py", + IsNotebook: true, + }, + }) require.Equal(t, build.GetInfo().Version, state.CliVersion) diags = bundle.Apply(ctx, b, s) @@ -74,45 +78,22 @@ func TestStateUpdate(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), state.Seq) - require.Len(t, state.Files, 3) + require.Equal(t, state.Files, Filelist{ + { + LocalPath: "test1.py", + }, + { + LocalPath: "test2.py", + IsNotebook: true, + }, + }) require.Equal(t, build.GetInfo().Version, state.CliVersion) } func TestStateUpdateWithExistingState(t *testing.T) { s := &stateUpdate{} - b := &bundle.Bundle{ - RootPath: t.TempDir(), - Config: config.Root{ - Bundle: config.Bundle{ - Target: "default", - }, - Workspace: config.Workspace{ - StatePath: "/state", - FilePath: "/files", - CurrentUser: &config.User{ - User: &iam.User{ - UserName: "test-user", - }, - }, - }, - }, - } - - testutil.Touch(t, b.RootPath, "test1.py") - testutil.Touch(t, b.RootPath, "test2.py") - - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &databrickscfg.Config{ - Host: "https://test.com", - } - b.SetWorkpaceClient(m.WorkspaceClient) - - wsApi := m.GetMockWorkspaceAPI() - wsApi.EXPECT().GetStatusByPath(mock.Anything, "/files").Return(&workspace.ObjectInfo{ - ObjectType: "DIRECTORY", - }, nil) - + b := setupBundleForStateUpdate(t) ctx := context.Background() // Create an existing state file. @@ -144,6 +125,14 @@ func TestStateUpdateWithExistingState(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(11), state.Seq) - require.Len(t, state.Files, 3) + require.Equal(t, state.Files, Filelist{ + { + LocalPath: "test1.py", + }, + { + LocalPath: "test2.py", + IsNotebook: true, + }, + }) require.Equal(t, build.GetInfo().Version, state.CliVersion) } diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index 72ad8eb3a..df3e087c2 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -72,7 +72,8 @@ func newSyncCommand() *cobra.Command { return s.RunContinuous(ctx) } - return s.RunOnce(ctx) + _, err = s.RunOnce(ctx) + return err } return cmd diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index e5f1bfc9e..bab451593 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -135,7 +135,7 @@ func New() *cobra.Command { if f.watch { err = s.RunContinuous(ctx) } else { - err = s.RunOnce(ctx) + _, err = s.RunOnce(ctx) } s.Close() diff --git a/libs/sync/sync.go b/libs/sync/sync.go index 585e8a887..12b1f1d05 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -152,36 +152,41 @@ func (s *Sync) notifyComplete(ctx context.Context, d diff) { s.seq++ } -func (s *Sync) RunOnce(ctx context.Context) error { +// Upload all files in the file tree rooted at the local path configured in the +// SyncOptions to the remote path configured in the SyncOptions. +// +// Returns the list of files tracked (and synchronized) by the syncer during the run, +// and an error if any occurred. +func (s *Sync) RunOnce(ctx context.Context) ([]fileset.File, error) { files, err := s.GetFileList(ctx) if err != nil { - return err + return files, err } change, err := s.snapshot.diff(ctx, files) if err != nil { - return err + return files, err } s.notifyStart(ctx, change) if change.IsEmpty() { s.notifyComplete(ctx, change) - return nil + return files, nil } err = s.applyDiff(ctx, change) if err != nil { - return err + return files, err } err = s.snapshot.Save(ctx) if err != nil { log.Errorf(ctx, "cannot store snapshot: %s", err) - return err + return files, err } s.notifyComplete(ctx, change) - return nil + return files, nil } func (s *Sync) GetFileList(ctx context.Context) ([]fileset.File, error) { @@ -231,7 +236,7 @@ func (s *Sync) RunContinuous(ctx context.Context) error { case <-ctx.Done(): return ctx.Err() case <-ticker.C: - err := s.RunOnce(ctx) + _, err := s.RunOnce(ctx) if err != nil { return err } From ac6b80ed88d57d19908c60271dafad6326d83479 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 17 Jun 2024 15:19:00 +0530 Subject: [PATCH 225/286] Remove user credentials specified in the Git origin URL (#1494) ## Changes We set the origin URL as metadata in any jobs created by DABs. This PR makes sure user credentials do not leak into the set metadata in the job. ## Tests Unit test --------- Co-authored-by: Pieter Noordhuis --- libs/git/repository.go | 18 +++++++++++++++++- libs/git/repository_test.go | 6 ++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/libs/git/repository.go b/libs/git/repository.go index 86d56a7fc..6940ddac8 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io/fs" + "net/url" "path" "path/filepath" "strings" @@ -100,7 +101,22 @@ func (r *Repository) LatestCommit() (string, error) { // return origin url if it's defined, otherwise an empty string func (r *Repository) OriginUrl() string { - return r.config.variables["remote.origin.url"] + rawUrl := r.config.variables["remote.origin.url"] + + // Remove username and password from the URL. + parsedUrl, err := url.Parse(rawUrl) + if err != nil { + // Git supports https URLs and non standard URLs like "ssh://" or "file://". + // Parsing these URLs is not supported by the Go standard library. In case + // of an error, we return the raw URL. This is okay because for ssh URLs + // because passwords cannot be included in the URL. + return rawUrl + } + // Setting User to nil removes the username and password from the URL when + // .String() is called. + // See: https://pkg.go.dev/net/url#URL.String + parsedUrl.User = nil + return parsedUrl.String() } // loadConfig loads and combines user specific and repository specific configuration files. diff --git a/libs/git/repository_test.go b/libs/git/repository_test.go index 7ddc7ea79..a28038eeb 100644 --- a/libs/git/repository_test.go +++ b/libs/git/repository_test.go @@ -207,3 +207,9 @@ func TestRepositoryGitConfigWhenNotARepo(t *testing.T) { originUrl := repo.OriginUrl() assert.Equal(t, "", originUrl) } + +func TestRepositoryOriginUrlRemovesUserCreds(t *testing.T) { + repo := newTestRepository(t) + repo.addOriginUrl("https://username:token@github.com/databricks/foobar.git") + repo.assertOriginUrl("https://github.com/databricks/foobar.git") +} From 4904dfb047ff0920b55ce0a9b458a34ee02a14e5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 14:18:08 +0200 Subject: [PATCH 226/286] Bump github.com/briandowns/spinner from 1.23.0 to 1.23.1 (#1495) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/briandowns/spinner](https://github.com/briandowns/spinner) from 1.23.0 to 1.23.1.
Release notes

Sourced from github.com/briandowns/spinner's releases.

v1.23.1

What's Changed

New Contributors

Full Changelog: https://github.com/briandowns/spinner/compare/v1.23.0...v1.23.1

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/briandowns/spinner&package-manager=go_modules&previous-version=1.23.0&new-version=1.23.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 727ac702f..de18efc5b 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT - github.com/briandowns/spinner v1.23.0 // Apache 2.0 + github.com/briandowns/spinner v1.23.1 // Apache 2.0 github.com/databricks/databricks-sdk-go v0.42.0 // Apache 2.0 github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE diff --git a/go.sum b/go.sum index 38db2aa69..1e0df9328 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97 github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= -github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= +github.com/briandowns/spinner v1.23.1 h1:t5fDPmScwUjozhDj4FA46p5acZWIPXYE30qW2Ptu650= +github.com/briandowns/spinner v1.23.1/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= From 8d658589ed0fbc34d0e5600b85f5a5c3d927a184 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 14:29:50 +0200 Subject: [PATCH 227/286] Bump github.com/spf13/cobra from 1.8.0 to 1.8.1 (#1496) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.8.0 to 1.8.1.
Release notes

Sourced from github.com/spf13/cobra's releases.

v1.8.1

✨ Features

🐛 Bug fixes

🔧 Maintenance

🧪 Testing & CI/CD

✏️ Documentation

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/spf13/cobra&package-manager=go_modules&previous-version=1.8.0&new-version=1.8.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index de18efc5b..bcfbae470 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/nwidger/jsoncolor v0.3.2 // MIT github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // BSD-2-Clause github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // MIT - github.com/spf13/cobra v1.8.0 // Apache 2.0 + github.com/spf13/cobra v1.8.1 // Apache 2.0 github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 diff --git a/go.sum b/go.sum index 1e0df9328..0f4f62d90 100644 --- a/go.sum +++ b/go.sum @@ -29,7 +29,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/databricks/databricks-sdk-go v0.42.0 h1:WKdoqnvb+jvsR9+IYkC3P4BH5eJHRzVOr59y3mCoY+s= @@ -139,8 +139,8 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= From 533d357a71b32a4d616d98dc4bef9d552baaf531 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 17 Jun 2024 17:56:49 +0200 Subject: [PATCH 228/286] Fix typo in DBT template (#1498) ## Changes Found in https://github.com/databricks/bundle-examples/pull/26. ## Tests n/a --- .../templates/dbt-sql/template/{{.project_name}}/README.md.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl index d46b61f72..dbf8a8d85 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl @@ -94,7 +94,7 @@ target-specific settings. Read more about dbt profiles on Databricks at https://docs.databricks.com/en/workflows/jobs/how-to/use-dbt-in-workflows.html#advanced-run-dbt-with-a-custom-profile. The target workspaces for staging and prod are defined in databricks.yml. -You can manaully deploy based on these configurations (see below). +You can manually deploy based on these configurations (see below). Or you can use CI/CD to automate deployment. See https://docs.databricks.com/dev-tools/bundles/ci-cd.html for documentation on CI/CD setup. From 274688d8a23aeb996a19fefd230cd51ca4be8146 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 18 Jun 2024 19:44:27 +0530 Subject: [PATCH 229/286] Clean up unused code (#1502) ## Changes 1. Removes `DefaultMutatorsForTarget` which is no longer used anywhere 2. Makes SnapshotPath a private field. It's no longer needed by data structures outside its package. FYI, I also tried finding other instances of dead code but I could not find anything else that was safe to remove. I used https://go.dev/blog/deadcode to search for them, and the other instances either implemented an interface, increased test coverage for some of our other code paths or there was some other reason I could not remove them (like autogenerated functions or used in tests). Good sign our codebase is mostly clean (at least superficially). --- bundle/config/mutator/mutator.go | 7 ------- libs/sync/snapshot.go | 14 +++++++------- libs/sync/sync.go | 4 ---- 3 files changed, 7 insertions(+), 18 deletions(-) diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index ae0d7e5fb..7d7711118 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -26,10 +26,3 @@ func DefaultMutators() []bundle.Mutator { LoadGitDetails(), } } - -func DefaultMutatorsForTarget(target string) []bundle.Mutator { - return append( - DefaultMutators(), - SelectTarget(target), - ) -} diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index b46bd19f4..f2920d8c2 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -35,7 +35,7 @@ const LatestSnapshotVersion = "v1" type Snapshot struct { // Path where this snapshot was loaded from and will be saved to. // Intentionally not part of the snapshot state because it may be moved by the user. - SnapshotPath string `json:"-"` + snapshotPath string // New indicates if this is a fresh snapshot or if it was loaded from disk. New bool `json:"-"` @@ -70,7 +70,7 @@ func NewSnapshot(localFiles []fileset.File, opts *SyncOptions) (*Snapshot, error snapshotState.ResetLastModifiedTimes() return &Snapshot{ - SnapshotPath: snapshotPath, + snapshotPath: snapshotPath, New: true, Version: LatestSnapshotVersion, Host: opts.Host, @@ -107,7 +107,7 @@ func newSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { } return &Snapshot{ - SnapshotPath: path, + snapshotPath: path, New: true, Version: LatestSnapshotVersion, @@ -122,7 +122,7 @@ func newSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { } func (s *Snapshot) Save(ctx context.Context) error { - f, err := os.OpenFile(s.SnapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + f, err := os.OpenFile(s.snapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { return fmt.Errorf("failed to create/open persisted sync snapshot file: %s", err) } @@ -147,11 +147,11 @@ func loadOrNewSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error } // Snapshot file not found. We return the new copy. - if _, err := os.Stat(snapshot.SnapshotPath); errors.Is(err, fs.ErrNotExist) { + if _, err := os.Stat(snapshot.snapshotPath); errors.Is(err, fs.ErrNotExist) { return snapshot, nil } - bytes, err := os.ReadFile(snapshot.SnapshotPath) + bytes, err := os.ReadFile(snapshot.snapshotPath) if err != nil { return nil, fmt.Errorf("failed to read sync snapshot from disk: %s", err) } @@ -191,7 +191,7 @@ func (s *Snapshot) diff(ctx context.Context, all []fileset.File) (diff, error) { currentState := s.SnapshotState if err := currentState.validate(); err != nil { - return diff{}, fmt.Errorf("error parsing existing sync state. Please delete your existing sync snapshot file (%s) and retry: %w", s.SnapshotPath, err) + return diff{}, fmt.Errorf("error parsing existing sync state. Please delete your existing sync snapshot file (%s) and retry: %w", s.snapshotPath, err) } // Compute diff to apply to get from current state to new target state. diff --git a/libs/sync/sync.go b/libs/sync/sync.go index 12b1f1d05..3d5bc61ec 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -223,10 +223,6 @@ func (s *Sync) GetFileList(ctx context.Context) ([]fileset.File, error) { return all.Iter(), nil } -func (s *Sync) SnapshotPath() string { - return s.snapshot.SnapshotPath -} - func (s *Sync) RunContinuous(ctx context.Context) error { ticker := time.NewTicker(s.PollInterval) defer ticker.Stop() From 553fdd1e818362b52ff1b9c3f35f82f4990b18d7 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 18 Jun 2024 20:34:20 +0530 Subject: [PATCH 230/286] Serialize dynamic value for `bundle validate` output (#1499) ## Changes Using dynamic values allows us to retain references like `${resources.jobs...}` even when the type of field is not integer, eg: `run_job_task`, or in general values that do not map to the Go types for a field. ## Tests Integration test --- bundle/config/root.go | 6 ++++ cmd/bundle/validate.go | 2 +- internal/bundle/helpers.go | 7 ++++ internal/bundle/validate_test.go | 60 ++++++++++++++++++++++++++++++++ internal/testutil/file.go | 48 +++++++++++++++++++++++++ internal/testutil/touch.go | 26 -------------- 6 files changed, 122 insertions(+), 27 deletions(-) create mode 100644 internal/bundle/validate_test.go create mode 100644 internal/testutil/file.go delete mode 100644 internal/testutil/touch.go diff --git a/bundle/config/root.go b/bundle/config/root.go index 88197c2b8..2bc905bd6 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -471,3 +471,9 @@ func (r Root) GetLocation(path string) dyn.Location { } return v.Location() } + +// Value returns the dynamic configuration value of the root object. This value +// is the source of truth and is kept in sync with values in the typed configuration. +func (r Root) Value() dyn.Value { + return r.value +} diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 8d49ec961..a1f8d2681 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -119,7 +119,7 @@ func renderTextOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnosti } func renderJsonOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnostics) error { - buf, err := json.MarshalIndent(b.Config, "", " ") + buf, err := json.MarshalIndent(b.Config.Value().AsAny(), "", " ") if err != nil { return err } diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 560a0474b..a17964b16 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -51,6 +51,13 @@ func writeConfigFile(t *testing.T, config map[string]any) (string, error) { return filepath, err } +func validateBundle(t *testing.T, ctx context.Context, path string) ([]byte, error) { + t.Setenv("BUNDLE_ROOT", path) + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "validate", "--output", "json") + stdout, _, err := c.Run() + return stdout.Bytes(), err +} + func deployBundle(t *testing.T, ctx context.Context, path string) error { t.Setenv("BUNDLE_ROOT", path) c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") diff --git a/internal/bundle/validate_test.go b/internal/bundle/validate_test.go new file mode 100644 index 000000000..18da89e4c --- /dev/null +++ b/internal/bundle/validate_test.go @@ -0,0 +1,60 @@ +package bundle + +import ( + "context" + "encoding/json" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccBundleValidate(t *testing.T) { + testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") + + tmpDir := t.TempDir() + testutil.WriteFile(t, + ` +bundle: + name: "foobar" + +resources: + jobs: + outer_loop: + name: outer loop + tasks: + - task_key: my task + run_job_task: + job_id: ${resources.jobs.inner_loop.id} + + inner_loop: + name: inner loop + +`, tmpDir, "databricks.yml") + + ctx := context.Background() + stdout, err := validateBundle(t, ctx, tmpDir) + require.NoError(t, err) + + config := make(map[string]any) + err = json.Unmarshal(stdout, &config) + require.NoError(t, err) + + getValue := func(key string) any { + v, err := convert.FromTyped(config, dyn.NilValue) + require.NoError(t, err) + v, err = dyn.GetByPath(v, dyn.MustPathFromString(key)) + require.NoError(t, err) + return v.AsAny() + } + + assert.Equal(t, "foobar", getValue("bundle.name")) + assert.Equal(t, "outer loop", getValue("resources.jobs.outer_loop.name")) + assert.Equal(t, "inner loop", getValue("resources.jobs.inner_loop.name")) + assert.Equal(t, "my task", getValue("resources.jobs.outer_loop.tasks[0].task_key")) + // Assert resource references are retained in the output. + assert.Equal(t, "${resources.jobs.inner_loop.id}", getValue("resources.jobs.outer_loop.tasks[0].run_job_task.job_id")) +} diff --git a/internal/testutil/file.go b/internal/testutil/file.go new file mode 100644 index 000000000..ba2c3280e --- /dev/null +++ b/internal/testutil/file.go @@ -0,0 +1,48 @@ +package testutil + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TouchNotebook(t *testing.T, elems ...string) string { + path := filepath.Join(elems...) + err := os.MkdirAll(filepath.Dir(path), 0755) + require.NoError(t, err) + + err = os.WriteFile(path, []byte("# Databricks notebook source"), 0644) + require.NoError(t, err) + return path +} + +func Touch(t *testing.T, elems ...string) string { + path := filepath.Join(elems...) + err := os.MkdirAll(filepath.Dir(path), 0755) + require.NoError(t, err) + + f, err := os.Create(path) + require.NoError(t, err) + + err = f.Close() + require.NoError(t, err) + return path +} + +func WriteFile(t *testing.T, content string, elems ...string) string { + path := filepath.Join(elems...) + err := os.MkdirAll(filepath.Dir(path), 0755) + require.NoError(t, err) + + f, err := os.Create(path) + require.NoError(t, err) + + _, err = f.WriteString(content) + require.NoError(t, err) + + err = f.Close() + require.NoError(t, err) + return path +} diff --git a/internal/testutil/touch.go b/internal/testutil/touch.go deleted file mode 100644 index 55683f3ed..000000000 --- a/internal/testutil/touch.go +++ /dev/null @@ -1,26 +0,0 @@ -package testutil - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" -) - -func TouchNotebook(t *testing.T, elems ...string) string { - path := filepath.Join(elems...) - os.MkdirAll(filepath.Dir(path), 0755) - err := os.WriteFile(path, []byte("# Databricks notebook source"), 0644) - require.NoError(t, err) - return path -} - -func Touch(t *testing.T, elems ...string) string { - path := filepath.Join(elems...) - os.MkdirAll(filepath.Dir(path), 0755) - f, err := os.Create(path) - require.NoError(t, err) - f.Close() - return path -} From 663aa9ab8cf0f4182bde669cc50620724a8829df Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 19 Jun 2024 10:03:06 +0200 Subject: [PATCH 231/286] Override variables with lookup value even if values has default value set (#1504) ## Changes This PR fixes the behaviour when variables were not overridden with lookup value from targets if these variables had any default value set in the default target. Fixes #1449 ## Tests Added regression test --- .../resolve_resource_references_test.go | 35 +++++++++++++++++++ bundle/config/mutator/set_variables.go | 12 +++---- .../variables/env_overrides/databricks.yml | 13 ++++--- bundle/tests/variables_test.go | 28 +++++++++++++-- 4 files changed, 76 insertions(+), 12 deletions(-) diff --git a/bundle/config/mutator/resolve_resource_references_test.go b/bundle/config/mutator/resolve_resource_references_test.go index 60636bcc6..214b712e3 100644 --- a/bundle/config/mutator/resolve_resource_references_test.go +++ b/bundle/config/mutator/resolve_resource_references_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/env" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -194,3 +195,37 @@ func TestResolveLookupVariableReferencesInVariableLookups(t *testing.T) { diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences())) require.ErrorContains(t, diags.Error(), "lookup variables cannot contain references to another lookup variables") } + +func TestNoResolveLookupIfVariableSetWithEnvVariable(t *testing.T) { + s := func(s string) *string { + return &s + } + + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Target: "dev", + }, + Variables: map[string]*variable.Variable{ + "foo": { + Value: s("bar"), + }, + "lookup": { + Lookup: &variable.Lookup{ + Cluster: "cluster-${var.foo}-${bundle.target}", + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + ctx := context.Background() + ctx = env.Set(ctx, "BUNDLE_VAR_lookup", "1234-5678-abcd") + + diags := bundle.Apply(ctx, b, bundle.Seq(SetVariables(), ResolveVariableReferencesInLookup(), ResolveResourceReferences())) + require.NoError(t, diags.Error()) + require.Equal(t, "1234-5678-abcd", *b.Config.Variables["lookup"].Value) +} diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index eae1fe2ab..0cee24ab6 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -37,6 +37,12 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Di return nil } + // case: Defined a variable for named lookup for a resource + // It will be resolved later in ResolveResourceReferences mutator + if v.Lookup != nil { + return nil + } + // case: Set the variable to its default value if v.HasDefault() { err := v.Set(*v.Default) @@ -46,12 +52,6 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Di return nil } - // case: Defined a variable for named lookup for a resource - // It will be resolved later in ResolveResourceReferences mutator - if v.Lookup != nil { - return nil - } - // We should have had a value to set for the variable at this point. return diag.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) } diff --git a/bundle/tests/variables/env_overrides/databricks.yml b/bundle/tests/variables/env_overrides/databricks.yml index e8adb9566..560513bc3 100644 --- a/bundle/tests/variables/env_overrides/databricks.yml +++ b/bundle/tests/variables/env_overrides/databricks.yml @@ -8,14 +8,16 @@ variables: d: description: variable with lookup - lookup: - cluster: some-cluster + default: "" e: description: variable with lookup - lookup: - instance_pool: some-pool + default: "some-value" + f: + description: variable with lookup + lookup: + cluster_policy: wrong-cluster-policy bundle: name: test bundle @@ -49,4 +51,7 @@ targets: e: lookup: instance_pool: some-test-instance-pool + f: + lookup: + cluster_policy: some-test-cluster-policy b: prod-b diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index f51802684..09441483b 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -6,7 +6,10 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -112,13 +115,34 @@ func TestVariablesWithoutDefinition(t *testing.T) { func TestVariablesWithTargetLookupOverrides(t *testing.T) { b := load(t, "./variables/env_overrides") + + mockWorkspaceClient := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(mockWorkspaceClient.WorkspaceClient) + instancePoolApi := mockWorkspaceClient.GetMockInstancePoolsAPI() + instancePoolApi.EXPECT().GetByInstancePoolName(mock.Anything, "some-test-instance-pool").Return(&compute.InstancePoolAndStats{ + InstancePoolId: "1234", + }, nil) + + clustersApi := mockWorkspaceClient.GetMockClustersAPI() + clustersApi.EXPECT().GetByClusterName(mock.Anything, "some-test-cluster").Return(&compute.ClusterDetails{ + ClusterId: "4321", + }, nil) + + clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI() + clusterPoliciesApi.EXPECT().GetByName(mock.Anything, "some-test-cluster-policy").Return(&compute.Policy{ + PolicyId: "9876", + }, nil) + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-overrides-lookup"), mutator.SetVariables(), + mutator.ResolveResourceReferences(), )) + require.NoError(t, diags.Error()) - assert.Equal(t, "cluster: some-test-cluster", b.Config.Variables["d"].Lookup.String()) - assert.Equal(t, "instance-pool: some-test-instance-pool", b.Config.Variables["e"].Lookup.String()) + assert.Equal(t, "4321", *b.Config.Variables["d"].Value) + assert.Equal(t, "1234", *b.Config.Variables["e"].Value) + assert.Equal(t, "9876", *b.Config.Variables["f"].Value) } func TestVariableTargetOverrides(t *testing.T) { From cb4ab5007df3fedade35dba10ec509d068e416ac Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 19 Jun 2024 16:31:04 +0530 Subject: [PATCH 232/286] Add link to documentation for Homebrew installation to README (#1505) Co-authored-by: Julia Crawford (Databricks) --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5f3b78b79..51780d0f9 100644 --- a/README.md +++ b/README.md @@ -4,18 +4,21 @@ This project is in Public Preview. -Documentation about the full REST API coverage is available in the [docs folder](docs/commands.md). - Documentation is available at https://docs.databricks.com/dev-tools/cli/databricks-cli.html. ## Installation This CLI is packaged as a dependency-free binary executable and may be located in any directory. See https://github.com/databricks/cli/releases for releases and -[the docs pages](https://docs.databricks.com/dev-tools/cli/databricks-cli.html) for -installation instructions. +the [Databricks documentation](https://docs.databricks.com/en/dev-tools/cli/install.html) for detailed information about installing the CLI. ------ +### Homebrew + +We maintain a [Homebrew tap](https://github.com/databricks/homebrew-tap) for installing the Databricks CLI. You can find instructions for how to install, upgrade and downgrade the CLI using Homebrew [here](https://github.com/databricks/homebrew-tap/blob/main/README.md). + +------ +### Docker You can use the CLI via a Docker image by pulling the image from `ghcr.io`. You can find all available versions at: https://github.com/databricks/cli/pkgs/container/cli. ``` From deb3e365cdc4b3345542f8a9c4030eb0163c6bff Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Wed, 19 Jun 2024 15:54:35 +0200 Subject: [PATCH 233/286] Pause quality monitors when "mode: development" is used (#1481) ## Changes Similar to scheduled jobs, quality monitors should be paused when in development mode (in line with the [behavior for scheduled jobs](https://docs.databricks.com/en/dev-tools/bundles/deployment-modes.html)). @aravind-segu @arpitjasa-db please take a look and verify this behavior. - [x] Followup: documentation changes. If we make this change we should update https://docs.databricks.com/dev-tools/bundles/deployment-modes.html. ## Tests Unit tests --- bundle/config/mutator/process_target_mode.go | 10 ++++++++++ .../config/mutator/process_target_mode_test.go | 16 ++++++++++++++++ bundle/tests/quality_monitor/databricks.yml | 15 +++++++++++---- bundle/tests/quality_monitor_test.go | 6 +++--- 4 files changed, 40 insertions(+), 7 deletions(-) diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 8e70fab73..53d97a5b4 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" ) @@ -105,6 +106,15 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagno // (registered models in Unity Catalog don't yet support tags) } + for i := range r.QualityMonitors { + // Remove all schedules from monitors, since they don't support pausing/unpausing. + // Quality monitors might support the "pause" property in the future, so at the + // CLI level we do respect that property if it is set to "unpaused". + if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused { + r.QualityMonitors[i].Schedule = nil + } + } + return nil } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index cf8229bfe..6a38f274f 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -99,6 +99,20 @@ func mockBundle(mode config.Mode) *bundle.Bundle { }, QualityMonitors: map[string]*resources.QualityMonitor{ "qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}}, + "qualityMonitor2": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "qualityMonitor2", + Schedule: &catalog.MonitorCronSchedule{}, + }, + }, + "qualityMonitor3": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "qualityMonitor3", + Schedule: &catalog.MonitorCronSchedule{ + PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused, + }, + }, + }, }, }, }, @@ -151,6 +165,8 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Quality Monitor 1 assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) + assert.Nil(t, b.Config.Resources.QualityMonitors["qualityMonitor2"].Schedule) + assert.Equal(t, catalog.MonitorCronSchedulePauseStatusUnpaused, b.Config.Resources.QualityMonitors["qualityMonitor3"].Schedule.PauseStatus) } func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { diff --git a/bundle/tests/quality_monitor/databricks.yml b/bundle/tests/quality_monitor/databricks.yml index 3abcdfdda..6138b9357 100644 --- a/bundle/tests/quality_monitor/databricks.yml +++ b/bundle/tests/quality_monitor/databricks.yml @@ -1,19 +1,26 @@ +bundle: + name: quality_monitors + resources: quality_monitors: my_monitor: - table_name: "main.test.thing1" + table_name: "main.test.dev" assets_dir: "/Shared/provider-test/databricks_monitoring/main.test.thing1" - output_schema_name: "test" + output_schema_name: "main.dev" inference_log: granularities: ["1 day"] timestamp_col: "timestamp" prediction_col: "prediction" model_id_col: "model_id" problem_type: "PROBLEM_TYPE_REGRESSION" + schedule: + quartz_cron_expression: "0 0 12 * * ?" # every day at noon + timezone_id: UTC targets: development: mode: development + default: true resources: quality_monitors: my_monitor: @@ -24,14 +31,14 @@ targets: quality_monitors: my_monitor: table_name: "main.test.staging" - output_schema_name: "staging" + output_schema_name: "main.staging" production: resources: quality_monitors: my_monitor: table_name: "main.test.prod" - output_schema_name: "prod" + output_schema_name: "main.prod" inference_log: granularities: ["1 hour"] timestamp_col: "timestamp_prod" diff --git a/bundle/tests/quality_monitor_test.go b/bundle/tests/quality_monitor_test.go index d5db05196..9b91052f5 100644 --- a/bundle/tests/quality_monitor_test.go +++ b/bundle/tests/quality_monitor_test.go @@ -24,7 +24,7 @@ func TestMonitorTableNames(t *testing.T) { p := b.Config.Resources.QualityMonitors["my_monitor"] assert.Equal(t, "main.test.dev", p.TableName) assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) - assert.Equal(t, "test", p.OutputSchemaName) + assert.Equal(t, "main.dev", p.OutputSchemaName) assertExpectedMonitor(t, p) } @@ -36,7 +36,7 @@ func TestMonitorStaging(t *testing.T) { p := b.Config.Resources.QualityMonitors["my_monitor"] assert.Equal(t, "main.test.staging", p.TableName) assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) - assert.Equal(t, "staging", p.OutputSchemaName) + assert.Equal(t, "main.staging", p.OutputSchemaName) assertExpectedMonitor(t, p) } @@ -48,7 +48,7 @@ func TestMonitorProduction(t *testing.T) { p := b.Config.Resources.QualityMonitors["my_monitor"] assert.Equal(t, "main.test.prod", p.TableName) assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) - assert.Equal(t, "prod", p.OutputSchemaName) + assert.Equal(t, "main.prod", p.OutputSchemaName) inferenceLog := p.InferenceLog assert.Equal(t, []string{"1 day", "1 hour"}, inferenceLog.Granularities) From b2c03ea54cef1504c9af1b7b0d9dfabba7bff68e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 19 Jun 2024 17:24:57 +0200 Subject: [PATCH 234/286] Use `dyn.InvalidValue` to indicate absence (#1507) ## Changes Previously, the functions `Get` and `Index` returned `dyn.NilValue` to indicate that a map key or sequence index wasn't found. This is a valid value, so we need to differentiate between actual absence and a real `dyn.NilValue`. We do this with the zero value of a `dyn.Value` (also captured in the constant `dyn.InvalidValue`). ## Tests * Unit tests. * Renamed `Get` and `Index` to find and update all call sites. --- bundle/config/mutator/environments_compat.go | 8 ++-- bundle/config/mutator/merge_job_clusters.go | 2 +- bundle/config/mutator/merge_job_tasks.go | 2 +- .../config/mutator/merge_pipeline_clusters.go | 2 +- bundle/config/root.go | 30 +++++++------ libs/dyn/convert/from_typed.go | 8 +++- libs/dyn/value.go | 8 ++-- libs/dyn/value_underlying_test.go | 42 +++++++++---------- libs/dyn/walk.go | 6 +-- libs/dyn/walk_test.go | 6 +-- 10 files changed, 62 insertions(+), 52 deletions(-) diff --git a/bundle/config/mutator/environments_compat.go b/bundle/config/mutator/environments_compat.go index cbedcaefd..053fd2e36 100644 --- a/bundle/config/mutator/environments_compat.go +++ b/bundle/config/mutator/environments_compat.go @@ -32,18 +32,18 @@ func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) dia targets := v.Get("targets") // Return an error if both "environments" and "targets" are set. - if environments != dyn.NilValue && targets != dyn.NilValue { - return dyn.NilValue, fmt.Errorf( + if environments != dyn.InvalidValue && targets != dyn.InvalidValue { + return dyn.InvalidValue, fmt.Errorf( "both 'environments' and 'targets' are specified; only 'targets' should be used: %s", environments.Location().String(), ) } // Rewrite "environments" to "targets". - if environments != dyn.NilValue && targets == dyn.NilValue { + if environments != dyn.InvalidValue && targets == dyn.InvalidValue { nv, err := dyn.Set(v, "targets", environments) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } // Drop the "environments" key. return dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { diff --git a/bundle/config/mutator/merge_job_clusters.go b/bundle/config/mutator/merge_job_clusters.go index 20f4efe85..ec6154608 100644 --- a/bundle/config/mutator/merge_job_clusters.go +++ b/bundle/config/mutator/merge_job_clusters.go @@ -21,7 +21,7 @@ func (m *mergeJobClusters) Name() string { func (m *mergeJobClusters) jobClusterKey(v dyn.Value) string { switch v.Kind() { - case dyn.KindNil: + case dyn.KindInvalid, dyn.KindNil: return "" case dyn.KindString: return v.MustString() diff --git a/bundle/config/mutator/merge_job_tasks.go b/bundle/config/mutator/merge_job_tasks.go index 68c05383c..f9a9bf718 100644 --- a/bundle/config/mutator/merge_job_tasks.go +++ b/bundle/config/mutator/merge_job_tasks.go @@ -21,7 +21,7 @@ func (m *mergeJobTasks) Name() string { func (m *mergeJobTasks) taskKeyString(v dyn.Value) string { switch v.Kind() { - case dyn.KindNil: + case dyn.KindInvalid, dyn.KindNil: return "" case dyn.KindString: return v.MustString() diff --git a/bundle/config/mutator/merge_pipeline_clusters.go b/bundle/config/mutator/merge_pipeline_clusters.go index 0b1cf8983..c75f65326 100644 --- a/bundle/config/mutator/merge_pipeline_clusters.go +++ b/bundle/config/mutator/merge_pipeline_clusters.go @@ -22,7 +22,7 @@ func (m *mergePipelineClusters) Name() string { func (m *mergePipelineClusters) clusterLabel(v dyn.Value) string { switch v.Kind() { - case dyn.KindNil: + case dyn.KindInvalid, dyn.KindNil: // Note: the cluster label is optional and defaults to 'default'. // We therefore ALSO merge all clusters without a label. return "default" diff --git a/bundle/config/root.go b/bundle/config/root.go index 2bc905bd6..1d56ba80d 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -337,7 +337,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `run_as`. This field must be overwritten if set, not merged. - if v := target.Get("run_as"); v != dyn.NilValue { + if v := target.Get("run_as"); v != dyn.InvalidValue { root, err = dyn.Set(root, "run_as", v) if err != nil { return err @@ -345,7 +345,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Below, we're setting fields on the bundle key, so make sure it exists. - if root.Get("bundle") == dyn.NilValue { + if root.Get("bundle") == dyn.InvalidValue { root, err = dyn.Set(root, "bundle", dyn.NewValue(map[string]dyn.Value{}, dyn.Location{})) if err != nil { return err @@ -353,7 +353,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `mode`. This field must be overwritten if set, not merged. - if v := target.Get("mode"); v != dyn.NilValue { + if v := target.Get("mode"); v != dyn.InvalidValue { root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("mode")), v) if err != nil { return err @@ -361,7 +361,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `compute_id`. This field must be overwritten if set, not merged. - if v := target.Get("compute_id"); v != dyn.NilValue { + if v := target.Get("compute_id"); v != dyn.InvalidValue { root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")), v) if err != nil { return err @@ -369,7 +369,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `git`. - if v := target.Get("git"); v != dyn.NilValue { + if v := target.Get("git"); v != dyn.InvalidValue { ref, err := dyn.GetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("git"))) if err != nil { ref = dyn.NewValue(map[string]dyn.Value{}, dyn.Location{}) @@ -382,7 +382,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // If the branch was overridden, we need to clear the inferred flag. - if branch := v.Get("branch"); branch != dyn.NilValue { + if branch := v.Get("branch"); branch != dyn.InvalidValue { out, err = dyn.SetByPath(out, dyn.NewPath(dyn.Key("inferred")), dyn.NewValue(false, dyn.Location{})) if err != nil { return err @@ -410,7 +410,7 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { // For each target, rewrite the variables block. return dyn.Map(v, "targets", dyn.Foreach(func(_ dyn.Path, target dyn.Value) (dyn.Value, error) { // Confirm it has a variables block. - if target.Get("variables") == dyn.NilValue { + if target.Get("variables") == dyn.InvalidValue { return target, nil } @@ -440,15 +440,19 @@ func validateVariableOverrides(root, target dyn.Value) (err error) { var tv map[string]variable.Variable // Collect variables from the root. - err = convert.ToTyped(&rv, root.Get("variables")) - if err != nil { - return fmt.Errorf("unable to collect variables from root: %w", err) + if v := root.Get("variables"); v != dyn.InvalidValue { + err = convert.ToTyped(&rv, v) + if err != nil { + return fmt.Errorf("unable to collect variables from root: %w", err) + } } // Collect variables from the target. - err = convert.ToTyped(&tv, target.Get("variables")) - if err != nil { - return fmt.Errorf("unable to collect variables from target: %w", err) + if v := target.Get("variables"); v != dyn.InvalidValue { + err = convert.ToTyped(&tv, v) + if err != nil { + return fmt.Errorf("unable to collect variables from target: %w", err) + } } // Check that all variables in the target exist in the root. diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index ae491d8ab..b57d52be8 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -172,9 +172,15 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out := make([]dyn.Value, src.Len()) for i := 0; i < src.Len(); i++ { v := src.Index(i) + refv := ref.Index(i) + + // Use nil reference if there is no reference for this index. + if refv == dyn.InvalidValue { + refv = dyn.NilValue + } // Convert entry taking into account the reference value (may be equal to dyn.NilValue). - nv, err := fromTyped(v.Interface(), ref.Index(i), includeZeroValuedScalars) + nv, err := fromTyped(v.Interface(), refv, includeZeroValuedScalars) if err != nil { return dyn.InvalidValue, err } diff --git a/libs/dyn/value.go b/libs/dyn/value.go index 2e8f1b9af..3d62ea1f5 100644 --- a/libs/dyn/value.go +++ b/libs/dyn/value.go @@ -110,12 +110,12 @@ func (v Value) AsAny() any { func (v Value) Get(key string) Value { m, ok := v.AsMap() if !ok { - return NilValue + return InvalidValue } vv, ok := m.GetByString(key) if !ok { - return NilValue + return InvalidValue } return vv @@ -124,11 +124,11 @@ func (v Value) Get(key string) Value { func (v Value) Index(i int) Value { s, ok := v.v.([]Value) if !ok { - return NilValue + return InvalidValue } if i < 0 || i >= len(s) { - return NilValue + return InvalidValue } return s[i] diff --git a/libs/dyn/value_underlying_test.go b/libs/dyn/value_underlying_test.go index 9878cfaf9..83cffb772 100644 --- a/libs/dyn/value_underlying_test.go +++ b/libs/dyn/value_underlying_test.go @@ -18,15 +18,15 @@ func TestValueUnderlyingMap(t *testing.T) { vv1, ok := v.AsMap() assert.True(t, ok) - _, ok = dyn.NilValue.AsMap() + _, ok = dyn.InvalidValue.AsMap() assert.False(t, ok) vv2 := v.MustMap() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind map, got nil", func() { - dyn.NilValue.MustMap() + assert.PanicsWithValue(t, "expected kind map, got invalid", func() { + dyn.InvalidValue.MustMap() }) } @@ -40,15 +40,15 @@ func TestValueUnderlyingSequence(t *testing.T) { vv1, ok := v.AsSequence() assert.True(t, ok) - _, ok = dyn.NilValue.AsSequence() + _, ok = dyn.InvalidValue.AsSequence() assert.False(t, ok) vv2 := v.MustSequence() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind sequence, got nil", func() { - dyn.NilValue.MustSequence() + assert.PanicsWithValue(t, "expected kind sequence, got invalid", func() { + dyn.InvalidValue.MustSequence() }) } @@ -58,15 +58,15 @@ func TestValueUnderlyingString(t *testing.T) { vv1, ok := v.AsString() assert.True(t, ok) - _, ok = dyn.NilValue.AsString() + _, ok = dyn.InvalidValue.AsString() assert.False(t, ok) vv2 := v.MustString() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind string, got nil", func() { - dyn.NilValue.MustString() + assert.PanicsWithValue(t, "expected kind string, got invalid", func() { + dyn.InvalidValue.MustString() }) } @@ -76,15 +76,15 @@ func TestValueUnderlyingBool(t *testing.T) { vv1, ok := v.AsBool() assert.True(t, ok) - _, ok = dyn.NilValue.AsBool() + _, ok = dyn.InvalidValue.AsBool() assert.False(t, ok) vv2 := v.MustBool() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind bool, got nil", func() { - dyn.NilValue.MustBool() + assert.PanicsWithValue(t, "expected kind bool, got invalid", func() { + dyn.InvalidValue.MustBool() }) } @@ -94,15 +94,15 @@ func TestValueUnderlyingInt(t *testing.T) { vv1, ok := v.AsInt() assert.True(t, ok) - _, ok = dyn.NilValue.AsInt() + _, ok = dyn.InvalidValue.AsInt() assert.False(t, ok) vv2 := v.MustInt() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind int, got nil", func() { - dyn.NilValue.MustInt() + assert.PanicsWithValue(t, "expected kind int, got invalid", func() { + dyn.InvalidValue.MustInt() }) // Test int32 type specifically. @@ -124,15 +124,15 @@ func TestValueUnderlyingFloat(t *testing.T) { vv1, ok := v.AsFloat() assert.True(t, ok) - _, ok = dyn.NilValue.AsFloat() + _, ok = dyn.InvalidValue.AsFloat() assert.False(t, ok) vv2 := v.MustFloat() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind float, got nil", func() { - dyn.NilValue.MustFloat() + assert.PanicsWithValue(t, "expected kind float, got invalid", func() { + dyn.InvalidValue.MustFloat() }) // Test float64 type specifically. @@ -148,14 +148,14 @@ func TestValueUnderlyingTime(t *testing.T) { vv1, ok := v.AsTime() assert.True(t, ok) - _, ok = dyn.NilValue.AsTime() + _, ok = dyn.InvalidValue.AsTime() assert.False(t, ok) vv2 := v.MustTime() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind time, got nil", func() { - dyn.NilValue.MustTime() + assert.PanicsWithValue(t, "expected kind time, got invalid", func() { + dyn.InvalidValue.MustTime() }) } diff --git a/libs/dyn/walk.go b/libs/dyn/walk.go index 97b99b061..c51a11e22 100644 --- a/libs/dyn/walk.go +++ b/libs/dyn/walk.go @@ -28,7 +28,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro if err == ErrSkip { return v, nil } - return NilValue, err + return InvalidValue, err } switch v.Kind() { @@ -43,7 +43,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro continue } if err != nil { - return NilValue, err + return InvalidValue, err } out.Set(pk, nv) } @@ -57,7 +57,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro continue } if err != nil { - return NilValue, err + return InvalidValue, err } out = append(out, nv) } diff --git a/libs/dyn/walk_test.go b/libs/dyn/walk_test.go index d62b9a4db..f7222b0a5 100644 --- a/libs/dyn/walk_test.go +++ b/libs/dyn/walk_test.go @@ -39,7 +39,7 @@ func (w *walkCallTracker) returnSkip(path string) { } func (w *walkCallTracker) returnDrop(path string) { - w.on(path, func(v Value) Value { return NilValue }, ErrDrop) + w.on(path, func(v Value) Value { return InvalidValue }, ErrDrop) } func (w *walkCallTracker) track(p Path, v Value) (Value, error) { @@ -148,7 +148,7 @@ func TestWalkMapError(t *testing.T) { }) out, err := Walk(value, tracker.track) assert.Equal(t, cerr, err) - assert.Equal(t, NilValue, out) + assert.Equal(t, InvalidValue, out) // The callback should have been called twice. assert.Len(t, tracker.calls, 2) @@ -239,7 +239,7 @@ func TestWalkSequenceError(t *testing.T) { }) out, err := Walk(value, tracker.track) assert.Equal(t, cerr, err) - assert.Equal(t, NilValue, out) + assert.Equal(t, InvalidValue, out) // The callback should have been called three times. assert.Len(t, tracker.calls, 3) From 57a5a65f8711c481ba1598921adb645eb7195a97 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Thu, 20 Jun 2024 10:43:08 +0200 Subject: [PATCH 235/286] Add ApplyPythonMutator (#1430) ## Changes Add ApplyPythonMutator, which will fork the Python subprocess and process pipe bundle configuration through it. It's enabled through `experimental` section, for example: ```yaml experimental: pydabs: enable: true venv_path: .venv ``` For now, it's limited to two phases in the mutator pipeline: - `load`: adds new jobs - `init`: adds new jobs, or modifies existing ones It's enforced that no jobs are modified in `load` and not jobs are deleted in `load/init`, because, otherwise, it will break existing assumptions. ## Tests Unit tests --- bundle/config/experimental.go | 16 + bundle/config/mutator/mutator.go | 2 + .../mutator/python/apply_python_mutator.go | 268 +++++++++++ .../python/apply_python_mutator_test.go | 450 ++++++++++++++++++ bundle/config/mutator/python/log_writer.go | 42 ++ bundle/config/root.go | 4 + bundle/phases/initialize.go | 4 + libs/process/opts.go | 21 + 8 files changed, 807 insertions(+) create mode 100644 bundle/config/mutator/python/apply_python_mutator.go create mode 100644 bundle/config/mutator/python/apply_python_mutator_test.go create mode 100644 bundle/config/mutator/python/log_writer.go diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index 008d7b909..12048a322 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -23,6 +23,22 @@ type Experimental struct { // be removed in the future once we have a proper workaround like allowing IS_OWNER // as a top-level permission in the DAB. UseLegacyRunAs bool `json:"use_legacy_run_as,omitempty"` + + // PyDABs determines whether to load the 'databricks-pydabs' package. + // + // PyDABs allows to define bundle configuration using Python. + PyDABs PyDABs `json:"pydabs,omitempty"` +} + +type PyDABs struct { + // Enabled is a flag to enable the feature. + Enabled bool `json:"enabled,omitempty"` + + // VEnvPath is path to the virtual environment. + // + // Required if PyDABs is enabled. PyDABs will load the code in the specified + // environment. + VEnvPath string `json:"venv_path,omitempty"` } type Command string diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index 7d7711118..d6bfcb775 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -4,6 +4,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/loader" + pythonmutator "github.com/databricks/cli/bundle/config/mutator/python" "github.com/databricks/cli/bundle/scripts" ) @@ -24,5 +25,6 @@ func DefaultMutators() []bundle.Mutator { InitializeVariables(), DefineDefaultTarget(), LoadGitDetails(), + pythonmutator.ApplyPythonMutator(pythonmutator.ApplyPythonMutatorPhaseLoad), } } diff --git a/bundle/config/mutator/python/apply_python_mutator.go b/bundle/config/mutator/python/apply_python_mutator.go new file mode 100644 index 000000000..298ffb576 --- /dev/null +++ b/bundle/config/mutator/python/apply_python_mutator.go @@ -0,0 +1,268 @@ +package python + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/process" +) + +type phase string + +const ( + // ApplyPythonMutatorPhaseLoad is the phase in which bundle configuration is loaded. + // + // At this stage, PyDABs adds statically defined resources to the bundle configuration. + // Which resources are added should be deterministic and not depend on the bundle configuration. + // + // We also open for possibility of appending other sections of bundle configuration, + // for example, adding new variables. However, this is not supported yet, and CLI rejects + // such changes. + ApplyPythonMutatorPhaseLoad phase = "load" + + // ApplyPythonMutatorPhaseInit is the phase after bundle configuration was loaded, and + // the list of statically declared resources is known. + // + // At this stage, PyDABs adds resources defined using generators, or mutates existing resources, + // including the ones defined using YAML. + // + // During this process, within generator and mutators, PyDABs can access: + // - selected deployment target + // - bundle variables values + // - variables provided through CLI arguments or environment variables + // + // The following is not available: + // - variables referencing other variables are in unresolved format + // + // PyDABs can output YAML containing references to variables, and CLI should resolve them. + // + // Existing resources can't be removed, and CLI rejects such changes. + ApplyPythonMutatorPhaseInit phase = "init" +) + +type applyPythonMutator struct { + phase phase +} + +func ApplyPythonMutator(phase phase) bundle.Mutator { + return &applyPythonMutator{ + phase: phase, + } +} + +func (m *applyPythonMutator) Name() string { + return fmt.Sprintf("ApplyPythonMutator(%s)", m.phase) +} + +func getExperimental(b *bundle.Bundle) config.Experimental { + if b.Config.Experimental == nil { + return config.Experimental{} + } + + return *b.Config.Experimental +} + +func (m *applyPythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + experimental := getExperimental(b) + + if !experimental.PyDABs.Enabled { + return nil + } + + if experimental.PyDABs.VEnvPath == "" { + return diag.Errorf("\"experimental.pydabs.enabled\" can only be used when \"experimental.pydabs.venv_path\" is set") + } + + err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { + pythonPath := interpreterPath(experimental.PyDABs.VEnvPath) + + if _, err := os.Stat(pythonPath); err != nil { + if os.IsNotExist(err) { + return dyn.InvalidValue, fmt.Errorf("can't find %q, check if venv is created", pythonPath) + } else { + return dyn.InvalidValue, fmt.Errorf("can't find %q: %w", pythonPath, err) + } + } + + rightRoot, err := m.runPythonMutator(ctx, b.RootPath, pythonPath, leftRoot) + if err != nil { + return dyn.InvalidValue, err + } + + visitor, err := createOverrideVisitor(ctx, m.phase) + if err != nil { + return dyn.InvalidValue, err + } + + return merge.Override(leftRoot, rightRoot, visitor) + }) + + return diag.FromErr(err) +} + +func (m *applyPythonMutator) runPythonMutator(ctx context.Context, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, error) { + args := []string{ + pythonPath, + "-m", + "databricks.bundles.build", + "--phase", + string(m.phase), + } + + // we need to marshal dyn.Value instead of bundle.Config to JSON to support + // non-string fields assigned with bundle variables + rootConfigJson, err := json.Marshal(root.AsAny()) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to marshal root config: %w", err) + } + + logWriter := newLogWriter(ctx, "stderr: ") + + stdout, err := process.Background( + ctx, + args, + process.WithDir(rootPath), + process.WithStderrWriter(logWriter), + process.WithStdinReader(bytes.NewBuffer(rootConfigJson)), + ) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("python mutator process failed: %w", err) + } + + // we need absolute path, or because later parts of pipeline assume all paths are absolute + // and this file will be used as location + virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml")) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to get absolute path: %w", err) + } + + generated, err := yamlloader.LoadYAML(virtualPath, bytes.NewReader([]byte(stdout))) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to parse Python mutator output: %w", err) + } + + normalized, diagnostic := convert.Normalize(config.Root{}, generated) + if diagnostic.Error() != nil { + return dyn.InvalidValue, fmt.Errorf("failed to normalize Python mutator output: %w", diagnostic.Error()) + } + + // warnings shouldn't happen because output should be already normalized + // when it happens, it's a bug in the mutator, and should be treated as an error + + for _, d := range diagnostic.Filter(diag.Warning) { + return dyn.InvalidValue, fmt.Errorf("failed to normalize Python mutator output: %s", d.Summary) + } + + return normalized, nil +} + +func createOverrideVisitor(ctx context.Context, phase phase) (merge.OverrideVisitor, error) { + switch phase { + case ApplyPythonMutatorPhaseLoad: + return createLoadOverrideVisitor(ctx), nil + case ApplyPythonMutatorPhaseInit: + return createInitOverrideVisitor(ctx), nil + default: + return merge.OverrideVisitor{}, fmt.Errorf("unknown phase: %s", phase) + } +} + +// createLoadOverrideVisitor creates an override visitor for the load phase. +// +// During load, it's only possible to create new resources, and not modify or +// delete existing ones. +func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { + jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) + + return merge.OverrideVisitor{ + VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { + return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) + }, + VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { + if !valuePath.HasPrefix(jobsPath) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + + insertResource := len(valuePath) == len(jobsPath)+1 + + // adding a property into an existing resource is not allowed, because it changes it + if !insertResource { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + + log.Debugf(ctx, "Insert value at %q", valuePath.String()) + + return right, nil + }, + VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) + }, + } +} + +// createInitOverrideVisitor creates an override visitor for the init phase. +// +// During the init phase it's possible to create new resources, modify existing +// resources, but not delete existing resources. +func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { + jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) + + return merge.OverrideVisitor{ + VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { + if !valuePath.HasPrefix(jobsPath) { + return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) + } + + deleteResource := len(valuePath) == len(jobsPath)+1 + + if deleteResource { + return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) + } + + // deleting properties is allowed because it only changes an existing resource + log.Debugf(ctx, "Delete value at %q", valuePath.String()) + + return nil + }, + VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { + if !valuePath.HasPrefix(jobsPath) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + + log.Debugf(ctx, "Insert value at %q", valuePath.String()) + + return right, nil + }, + VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + if !valuePath.HasPrefix(jobsPath) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) + } + + log.Debugf(ctx, "Update value at %q", valuePath.String()) + + return right, nil + }, + } +} + +// interpreterPath returns platform-specific path to Python interpreter in the virtual environment. +func interpreterPath(venvPath string) string { + if runtime.GOOS == "windows" { + return filepath.Join(venvPath, "Scripts", "python3.exe") + } else { + return filepath.Join(venvPath, "bin", "python3") + } +} diff --git a/bundle/config/mutator/python/apply_python_mutator_test.go b/bundle/config/mutator/python/apply_python_mutator_test.go new file mode 100644 index 000000000..8759ab801 --- /dev/null +++ b/bundle/config/mutator/python/apply_python_mutator_test.go @@ -0,0 +1,450 @@ +package python + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "testing" + + "golang.org/x/exp/maps" + + "github.com/databricks/cli/libs/dyn" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/cli/libs/process" +) + +func TestApplyPythonMutator_Name_load(t *testing.T) { + mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + + assert.Equal(t, "ApplyPythonMutator(load)", mutator.Name()) +} + +func TestApplyPythonMutator_Name_init(t *testing.T) { + mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseInit) + + assert.Equal(t, "ApplyPythonMutator(init)", mutator.Name()) +} + +func TestApplyPythonMutator_load(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "load", + }, + `{ + "experimental": { + "pydabs": { + "enabled": true, + "venv_path": ".venv" + } + }, + "resources": { + "jobs": { + "job0": { + name: "job_0" + }, + "job1": { + name: "job_1" + }, + } + } + }`) + + mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.NoError(t, diag.Error()) + + assert.ElementsMatch(t, []string{"job0", "job1"}, maps.Keys(b.Config.Resources.Jobs)) + + if job0, ok := b.Config.Resources.Jobs["job0"]; ok { + assert.Equal(t, "job_0", job0.Name) + } + + if job1, ok := b.Config.Resources.Jobs["job1"]; ok { + assert.Equal(t, "job_1", job1.Name) + } +} + +func TestApplyPythonMutator_load_disallowed(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "load", + }, + `{ + "experimental": { + "pydabs": { + "enabled": true, + "venv_path": ".venv" + } + }, + "resources": { + "jobs": { + "job0": { + name: "job_0", + description: "job description" + } + } + } + }`) + + mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.EqualError(t, diag.Error(), "unexpected change at \"resources.jobs.job0.description\" (insert)") +} + +func TestApplyPythonMutator_init(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "init", + }, + `{ + "experimental": { + "pydabs": { + "enabled": true, + "venv_path": ".venv" + } + }, + "resources": { + "jobs": { + "job0": { + name: "job_0", + description: "my job" + } + } + } + }`) + + mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseInit) + diag := bundle.Apply(ctx, b, mutator) + + assert.NoError(t, diag.Error()) + + assert.ElementsMatch(t, []string{"job0"}, maps.Keys(b.Config.Resources.Jobs)) + assert.Equal(t, "job_0", b.Config.Resources.Jobs["job0"].Name) + assert.Equal(t, "my job", b.Config.Resources.Jobs["job0"].Description) +} + +func TestApplyPythonMutator_badOutput(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "load", + }, + `{ + "resources": { + "jobs": { + "job0": { + unknown_property: "my job" + } + } + } + }`) + + mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.EqualError(t, diag.Error(), "failed to normalize Python mutator output: unknown field: unknown_property") +} + +func TestApplyPythonMutator_disabled(t *testing.T) { + b := loadYaml("databricks.yml", ``) + + ctx := context.Background() + mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.NoError(t, diag.Error()) +} + +func TestApplyPythonMutator_venvRequired(t *testing.T) { + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true`) + + ctx := context.Background() + mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.Error(t, diag.Error(), "\"experimental.enable_pydabs\" is enabled, but \"experimental.venv.path\" is not set") +} + +func TestApplyPythonMutator_venvNotFound(t *testing.T) { + expectedError := fmt.Sprintf("can't find %q, check if venv is created", interpreterPath("bad_path")) + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: bad_path`) + + mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseInit) + diag := bundle.Apply(context.Background(), b, mutator) + + assert.EqualError(t, diag.Error(), expectedError) +} + +type createOverrideVisitorTestCase struct { + name string + updatePath dyn.Path + deletePath dyn.Path + insertPath dyn.Path + phase phase + updateError error + deleteError error + insertError error +} + +func TestCreateOverrideVisitor(t *testing.T) { + left := dyn.NewValue(42, dyn.Location{}) + right := dyn.NewValue(1337, dyn.Location{}) + + testCases := []createOverrideVisitorTestCase{ + { + name: "load: can't change an existing job", + phase: ApplyPythonMutatorPhaseLoad, + updatePath: dyn.MustPathFromString("resources.jobs.job0.name"), + deletePath: dyn.MustPathFromString("resources.jobs.job0.name"), + insertPath: dyn.MustPathFromString("resources.jobs.job0.name"), + deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (delete)"), + insertError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (insert)"), + updateError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (update)"), + }, + { + name: "load: can't delete an existing job", + phase: ApplyPythonMutatorPhaseLoad, + deletePath: dyn.MustPathFromString("resources.jobs.job0"), + deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), + }, + { + name: "load: can insert a job", + phase: ApplyPythonMutatorPhaseLoad, + insertPath: dyn.MustPathFromString("resources.jobs.job0"), + insertError: nil, + }, + { + name: "load: can't change include", + phase: ApplyPythonMutatorPhaseLoad, + deletePath: dyn.MustPathFromString("include[0]"), + insertPath: dyn.MustPathFromString("include[0]"), + updatePath: dyn.MustPathFromString("include[0]"), + deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"), + insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"), + updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"), + }, + { + name: "init: can change an existing job", + phase: ApplyPythonMutatorPhaseInit, + updatePath: dyn.MustPathFromString("resources.jobs.job0.name"), + deletePath: dyn.MustPathFromString("resources.jobs.job0.name"), + insertPath: dyn.MustPathFromString("resources.jobs.job0.name"), + deleteError: nil, + insertError: nil, + updateError: nil, + }, + { + name: "init: can't delete an existing job", + phase: ApplyPythonMutatorPhaseInit, + deletePath: dyn.MustPathFromString("resources.jobs.job0"), + deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), + }, + { + name: "init: can insert a job", + phase: ApplyPythonMutatorPhaseInit, + insertPath: dyn.MustPathFromString("resources.jobs.job0"), + insertError: nil, + }, + { + name: "init: can't change include", + phase: ApplyPythonMutatorPhaseInit, + deletePath: dyn.MustPathFromString("include[0]"), + insertPath: dyn.MustPathFromString("include[0]"), + updatePath: dyn.MustPathFromString("include[0]"), + deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"), + insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"), + updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"), + }, + } + + for _, tc := range testCases { + visitor, err := createOverrideVisitor(context.Background(), tc.phase) + if err != nil { + t.Fatalf("create visitor failed: %v", err) + } + + if tc.updatePath != nil { + t.Run(tc.name+"-update", func(t *testing.T) { + out, err := visitor.VisitUpdate(tc.updatePath, left, right) + + if tc.updateError != nil { + assert.Equal(t, tc.updateError, err) + } else { + assert.NoError(t, err) + assert.Equal(t, right, out) + } + }) + } + + if tc.deletePath != nil { + t.Run(tc.name+"-delete", func(t *testing.T) { + err := visitor.VisitDelete(tc.deletePath, left) + + if tc.deleteError != nil { + assert.Equal(t, tc.deleteError, err) + } else { + assert.NoError(t, err) + } + }) + } + + if tc.insertPath != nil { + t.Run(tc.name+"-insert", func(t *testing.T) { + out, err := visitor.VisitInsert(tc.insertPath, right) + + if tc.insertError != nil { + assert.Equal(t, tc.insertError, err) + } else { + assert.NoError(t, err) + assert.Equal(t, right, out) + } + }) + } + } +} + +func TestInterpreterPath(t *testing.T) { + if runtime.GOOS == "windows" { + assert.Equal(t, "venv\\Scripts\\python3.exe", interpreterPath("venv")) + } else { + assert.Equal(t, "venv/bin/python3", interpreterPath("venv")) + } +} + +func withProcessStub(args []string, stdout string) context.Context { + ctx := context.Background() + ctx, stub := process.WithStub(ctx) + + stub.WithCallback(func(actual *exec.Cmd) error { + if reflect.DeepEqual(actual.Args, args) { + _, err := actual.Stdout.Write([]byte(stdout)) + + return err + } else { + return fmt.Errorf("unexpected command: %v", actual.Args) + } + }) + + return ctx +} + +func loadYaml(name string, content string) *bundle.Bundle { + v, diag := config.LoadFromBytes(name, []byte(content)) + + if diag.Error() != nil { + panic(diag.Error()) + } + + return &bundle.Bundle{ + Config: *v, + } +} + +func withFakeVEnv(t *testing.T, path string) { + interpreterPath := interpreterPath(path) + + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + + if err := os.Chdir(t.TempDir()); err != nil { + panic(err) + } + + err = os.MkdirAll(filepath.Dir(interpreterPath), 0755) + if err != nil { + panic(err) + } + + err = os.WriteFile(interpreterPath, []byte(""), 0755) + if err != nil { + panic(err) + } + + t.Cleanup(func() { + if err := os.Chdir(cwd); err != nil { + panic(err) + } + }) +} diff --git a/bundle/config/mutator/python/log_writer.go b/bundle/config/mutator/python/log_writer.go new file mode 100644 index 000000000..aa3db0571 --- /dev/null +++ b/bundle/config/mutator/python/log_writer.go @@ -0,0 +1,42 @@ +package python + +import ( + "bufio" + "bytes" + "context" + "io" + + "github.com/databricks/cli/libs/log" +) + +type logWriter struct { + ctx context.Context + prefix string + buf bytes.Buffer +} + +// newLogWriter creates a new io.Writer that writes to log with specified prefix. +func newLogWriter(ctx context.Context, prefix string) io.Writer { + return &logWriter{ + ctx: ctx, + prefix: prefix, + } +} + +func (p *logWriter) Write(bytes []byte) (n int, err error) { + p.buf.Write(bytes) + + scanner := bufio.NewScanner(&p.buf) + + for scanner.Scan() { + line := scanner.Text() + + log.Debugf(p.ctx, "%s%s", p.prefix, line) + } + + remaining := p.buf.Bytes() + p.buf.Reset() + p.buf.Write(remaining) + + return len(bytes), nil +} diff --git a/bundle/config/root.go b/bundle/config/root.go index 1d56ba80d..2ce3a1389 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -74,6 +74,10 @@ func Load(path string) (*Root, diag.Diagnostics) { return nil, diag.FromErr(err) } + return LoadFromBytes(path, raw) +} + +func LoadFromBytes(path string, raw []byte) (*Root, diag.Diagnostics) { r := Root{} // Load configuration tree from YAML. diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index ded2e1980..d96c8d3b3 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -4,6 +4,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" + pythonmutator "github.com/databricks/cli/bundle/config/mutator/python" "github.com/databricks/cli/bundle/deploy/metadata" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/permissions" @@ -28,6 +29,9 @@ func Initialize() bundle.Mutator { mutator.ExpandWorkspaceRoot(), mutator.DefineDefaultWorkspacePaths(), mutator.SetVariables(), + // Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences + // and ResolveVariableReferences. See what is expected in ApplyPythonMutatorPhaseInit doc + pythonmutator.ApplyPythonMutator(pythonmutator.ApplyPythonMutatorPhaseInit), mutator.ResolveVariableReferencesInLookup(), mutator.ResolveResourceReferences(), mutator.ResolveVariableReferences( diff --git a/libs/process/opts.go b/libs/process/opts.go index e201c6668..9516e49ba 100644 --- a/libs/process/opts.go +++ b/libs/process/opts.go @@ -48,6 +48,27 @@ func WithStdoutPipe(dst *io.ReadCloser) execOption { } } +func WithStdinReader(src io.Reader) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Stdin = src + return nil + } +} + +func WithStderrWriter(dst io.Writer) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Stderr = dst + return nil + } +} + +func WithStdoutWriter(dst io.Writer) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Stdout = dst + return nil + } +} + func WithCombinedOutput(buf *bytes.Buffer) execOption { return func(_ context.Context, c *exec.Cmd) error { c.Stdout = io.MultiWriter(buf, c.Stdout) From 01adef666ab6db585dbd8fbb623bb04dec3ac4b4 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 21 Jun 2024 13:14:33 +0200 Subject: [PATCH 236/286] Set bool pointer to disable lock (#1516) ## Changes This cherry-picks from #1490 to address an issue that came up in #1511. The function `dyn.SetByPath` requires intermediate values to be present. If they are not, it returns an error that it cannot index a map. This is not an issue on main, where the intermediate maps are always created, even if they are not present in the dynamic configuration tree. As of #1511, we'll no longer populate empty maps for empty structs if they are not explicitly set (i.e., a non-nil pointer). This change writes a bool pointer to avoid this issue altogether. ## Tests Unit tests pass. --- bundle/config/mutator/process_target_mode.go | 15 ++------------- bundle/config/mutator/process_target_mode_test.go | 4 ++-- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 53d97a5b4..b50716fd6 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -9,7 +9,6 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/diag" - "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -34,10 +33,8 @@ func (m *processTargetMode) Name() string { func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if !b.Config.Bundle.Deployment.Lock.IsExplicitlyEnabled() { log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true") - err := disableDeploymentLock(b) - if err != nil { - return diag.FromErr(err) - } + disabled := false + b.Config.Bundle.Deployment.Lock.Enabled = &disabled } r := b.Config.Resources @@ -118,14 +115,6 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagno return nil } -func disableDeploymentLock(b *bundle.Bundle) error { - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - return dyn.Map(v, "bundle.deployment.lock", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { - return dyn.Set(v, "enabled", dyn.V(false)) - }) - }) -} - func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { if path := findNonUserPath(b); path != "" { return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path) diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 6a38f274f..03da64e77 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -330,7 +330,7 @@ func TestDisableLocking(t *testing.T) { ctx := context.Background() b := mockBundle(config.Development) - err := transformDevelopmentMode(ctx, b) + err := bundle.Apply(ctx, b, ProcessTargetMode()) require.Nil(t, err) assert.False(t, b.Config.Bundle.Deployment.Lock.IsEnabled()) } @@ -341,7 +341,7 @@ func TestDisableLockingDisabled(t *testing.T) { explicitlyEnabled := true b.Config.Bundle.Deployment.Lock.Enabled = &explicitlyEnabled - err := transformDevelopmentMode(ctx, b) + err := bundle.Apply(ctx, b, ProcessTargetMode()) require.Nil(t, err) assert.True(t, b.Config.Bundle.Deployment.Lock.IsEnabled(), "Deployment lock should remain enabled in development mode when explicitly enabled") } From 87bc58381917df8eae431e4f4a12912891ff67c6 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 21 Jun 2024 13:19:48 +0200 Subject: [PATCH 237/286] Allow the any type to be set to nil in `convert.FromTyped` (#1518) ## Changes This came up in integration testing for #1511. One of the tests converted a `map[string]any` to a dynamic value and encountered a `nil` and errored out. We can safely return a nil in this case. ## Tests Unit test passes. --- libs/dyn/convert/from_typed.go | 3 +++ libs/dyn/convert/from_typed_test.go | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index b57d52be8..e5fb0de6f 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -72,6 +72,9 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, return fromTypedInt(srcv, ref, options...) case reflect.Float32, reflect.Float64: return fromTypedFloat(srcv, ref, options...) + case reflect.Invalid: + // If the value is untyped and not set (e.g. any type with nil value), we return nil. + return dyn.NilValue, nil } return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index f75470f42..7a0dad84b 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -619,3 +619,11 @@ func TestFromTypedFloatTypeError(t *testing.T) { _, err := FromTyped(src, ref) require.Error(t, err) } + +func TestFromTypedAnyNil(t *testing.T) { + var src any = nil + var ref = dyn.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) +} From 446a9d0c52d09b80b647aecbd8286929f7647cb7 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 21 Jun 2024 15:43:21 +0200 Subject: [PATCH 238/286] Properly deal with nil values in `convert.FromTyped` (#1511) ## Changes When a configuration defines: ```yaml run_as: ``` It first showed up as `run_as -> nil` in the dynamic configuration only to later be converted to `run_as -> {}` while going through typed conversion. We were using the presence of a key to initialize an empty value. This is incorrect and it should have remained a nil value. This conversion was happening in `convert.FromTyped` where any struct always returned a map value. Instead, it should only return a map value in any one of these cases: 1) the struct has elements, 2) the struct was originally a map in the dynamic configuration, or 3) the struct was initialized to a non-empty pointer value. Stacked on top of #1516 and #1518. ## Tests * Unit tests pass. * Integration tests pass. * Manually ran through bundle CRUD with a bundle without resources. --- bundle/config/mutator/run_as_test.go | 7 ++- bundle/deploy/terraform/convert_test.go | 18 +++++++ bundle/permissions/filter.go | 5 ++ libs/dyn/convert/from_typed.go | 64 ++++++++++++++----------- libs/dyn/convert/from_typed_test.go | 58 ++++++++++++++++++---- 5 files changed, 115 insertions(+), 37 deletions(-) diff --git a/bundle/config/mutator/run_as_test.go b/bundle/config/mutator/run_as_test.go index c57de847b..67bf7bcc2 100644 --- a/bundle/config/mutator/run_as_test.go +++ b/bundle/config/mutator/run_as_test.go @@ -18,7 +18,7 @@ import ( func allResourceTypes(t *testing.T) []string { // Compute supported resource types based on the `Resources{}` struct. - r := config.Resources{} + r := &config.Resources{} rv, err := convert.FromTyped(r, dyn.NilValue) require.NoError(t, err) normalized, _ := convert.Normalize(r, rv, convert.IncludeMissingFields) @@ -154,6 +154,11 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) { v, err := convert.FromTyped(base, dyn.NilValue) require.NoError(t, err) + // Define top level resources key in the bundle configuration. + // This is not part of the typed configuration, so we need to add it manually. + v, err = dyn.Set(v, "resources", dyn.V(map[string]dyn.Value{})) + require.NoError(t, err) + for _, rt := range allResourceTypes(t) { // Skip allowed resources if slices.Contains(allowList, rt) { diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index e1f73be28..7ea448538 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -455,6 +455,24 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { var src = resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", + + // Need to specify this to satisfy the equivalence test: + // The previous method of generation includes the "create" field + // because it is required (not marked as `omitempty`). + // The previous method used [json.Marshal] from the standard library + // and as such observed the `omitempty` tag. + // The new method leverages [dyn.Value] where any field that is not + // explicitly set is not part of the value. + Config: serving.EndpointCoreConfigInput{ + ServedModels: []serving.ServedModelInput{ + { + ModelName: "model_name", + ModelVersion: "1", + ScaleToZeroEnabled: true, + WorkloadSize: "Small", + }, + }, + }, }, Permissions: []resources.Permission{ { diff --git a/bundle/permissions/filter.go b/bundle/permissions/filter.go index 6d39630c8..60264f6ea 100644 --- a/bundle/permissions/filter.go +++ b/bundle/permissions/filter.go @@ -66,6 +66,11 @@ func (m *filterCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.Di err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { rv, err := dyn.Get(v, "resources") if err != nil { + // If the resources key is not found, we can skip this mutator. + if dyn.IsNoSuchKeyError(err) { + return v, nil + } + return dyn.InvalidValue, err } diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index e5fb0de6f..af49a07ab 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -12,26 +12,24 @@ import ( type fromTypedOptions int const ( - // If this flag is set, zero values for scalars (strings, bools, ints, floats) - // would resolve to corresponding zero values in the dynamic representation. - // Otherwise, zero values for scalars resolve to dyn.NilValue. + // If this flag is set, zero values in the typed representation are resolved to + // the equivalent zero value in the dynamic representation. + // If it is not set, zero values resolve to [dyn.NilValue]. // - // This flag exists to reconcile the default values for scalars in a Go struct - // being zero values with zero values in a dynamic representation. In a Go struct, - // zero values are the same as the values not being set at all. This is not the case - // in the dynamic representation. - // - // If a scalar value in a typed Go struct is zero, in the dynamic representation - // we would set it to dyn.NilValue, i.e. equivalent to the value not being set at all. - // - // If a scalar value in a Go map, slice or pointer is set to zero, we will set it - // to the zero value in the dynamic representation, and not dyn.NilValue. This is - // equivalent to the value being intentionally set to zero. - includeZeroValuedScalars fromTypedOptions = 1 << iota + // This flag exists to reconcile default values in Go being zero values with values + // being intentionally set to their zero value. We capture zero values in the dynamic + // configuration if they are 1) behind a pointer, 2) a map value, 3) a slice element, + // in the typed configuration. + includeZeroValues fromTypedOptions = 1 << iota ) // FromTyped converts changes made in the typed structure w.r.t. the configuration value // back to the configuration value, retaining existing location information where possible. +// +// It uses the reference value both for location information and to determine if the typed +// value was changed or not. For example, if a struct-by-value field is nil in the reference +// it will be zero-valued in the typed configuration. If it remains zero-valued, this +// this function will still emit a nil value in the dynamic representation. func FromTyped(src any, ref dyn.Value) (dyn.Value, error) { return fromTyped(src, ref) } @@ -48,18 +46,18 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, } srcv = srcv.Elem() - // If a pointer to a scalar type points to a zero value, we should include + // If a pointer to a type points to a zero value, we should include // that zero value in the dynamic representation. // This is because by default a pointer is nil in Go, and it not being nil // indicates its value was intentionally set to zero. - if !slices.Contains(options, includeZeroValuedScalars) { - options = append(options, includeZeroValuedScalars) + if !slices.Contains(options, includeZeroValues) { + options = append(options, includeZeroValues) } } switch srcv.Kind() { case reflect.Struct: - return fromTypedStruct(srcv, ref) + return fromTypedStruct(srcv, ref, options...) case reflect.Map: return fromTypedMap(srcv, ref) case reflect.Slice: @@ -80,7 +78,7 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) } -func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { +func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { case dyn.KindMap, dyn.KindNil: @@ -108,12 +106,22 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, err } - if nv != dyn.NilValue { + // Either if the key was set in the reference or the field is not zero-valued, we include it. + if ok || nv != dyn.NilValue { out.Set(refk, nv) } } - return dyn.NewValue(out, ref.Location()), nil + // Return the new mapping if: + // 1. The mapping has entries (i.e. the struct was not empty). + // 2. The reference is a map (i.e. the struct was and still is empty). + // 3. The "includeZeroValues" option is set (i.e. the struct is a non-nil pointer). + if out.Len() > 0 || ref.Kind() == dyn.KindMap || slices.Contains(options, includeZeroValues) { + return dyn.NewValue(out, ref.Location()), nil + } + + // Otherwise, return nil. + return dyn.NilValue, nil } func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { @@ -146,7 +154,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { } // Convert entry taking into account the reference value (may be equal to dyn.NilValue). - nv, err := fromTyped(v.Interface(), refv, includeZeroValuedScalars) + nv, err := fromTyped(v.Interface(), refv, includeZeroValues) if err != nil { return dyn.InvalidValue, err } @@ -183,7 +191,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { } // Convert entry taking into account the reference value (may be equal to dyn.NilValue). - nv, err := fromTyped(v.Interface(), refv, includeZeroValuedScalars) + nv, err := fromTyped(v.Interface(), refv, includeZeroValues) if err != nil { return dyn.InvalidValue, err } @@ -206,7 +214,7 @@ func fromTypedString(src reflect.Value, ref dyn.Value, options ...fromTypedOptio case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { + if src.IsZero() && !slices.Contains(options, includeZeroValues) { return dyn.NilValue, nil } return dyn.V(src.String()), nil @@ -226,7 +234,7 @@ func fromTypedBool(src reflect.Value, ref dyn.Value, options ...fromTypedOptions case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { + if src.IsZero() && !slices.Contains(options, includeZeroValues) { return dyn.NilValue, nil } return dyn.V(src.Bool()), nil @@ -251,7 +259,7 @@ func fromTypedInt(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { + if src.IsZero() && !slices.Contains(options, includeZeroValues) { return dyn.NilValue, nil } return dyn.V(src.Int()), nil @@ -276,7 +284,7 @@ func fromTypedFloat(src reflect.Value, ref dyn.Value, options ...fromTypedOption case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { + if src.IsZero() && !slices.Contains(options, includeZeroValues) { return dyn.NilValue, nil } return dyn.V(src.Float()), nil diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index 7a0dad84b..e5447fe80 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -15,9 +15,14 @@ func TestFromTypedStructZeroFields(t *testing.T) { } src := Tmp{} - ref := dyn.NilValue - nv, err := FromTyped(src, ref) + // For an empty struct with a nil reference we expect a nil. + nv, err := FromTyped(src, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) + + // For an empty struct with a non-nil reference we expect an empty map. + nv, err = FromTyped(src, dyn.V(map[string]dyn.Value{})) require.NoError(t, err) assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) } @@ -28,17 +33,54 @@ func TestFromTypedStructPointerZeroFields(t *testing.T) { Bar string `json:"bar"` } - // For an initialized pointer we expect an empty map. - src := &Tmp{} - nv, err := FromTyped(src, dyn.NilValue) - require.NoError(t, err) - assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) + var src *Tmp + var nv dyn.Value + var err error - // For a nil pointer we expect nil. + // For a nil pointer with a nil reference we expect a nil. src = nil nv, err = FromTyped(src, dyn.NilValue) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) + + // For a nil pointer with a non-nil reference we expect a nil. + src = nil + nv, err = FromTyped(src, dyn.V(map[string]dyn.Value{})) + require.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) + + // For an initialized pointer with a nil reference we expect a nil. + src = &Tmp{} + nv, err = FromTyped(src, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) + + // For an initialized pointer with a non-nil reference we expect an empty map. + src = &Tmp{} + nv, err = FromTyped(src, dyn.V(map[string]dyn.Value{})) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) +} + +func TestFromTypedStructNilFields(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + // For a zero value struct with a reference containing nil fields we expect the nils to be retained. + src := Tmp{} + ref := dyn.V(map[string]dyn.Value{ + "foo": dyn.NilValue, + "bar": dyn.NilValue, + }) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "foo": dyn.NilValue, + "bar": dyn.NilValue, + }), nv) } func TestFromTypedStructSetFields(t *testing.T) { From 068c7cfc2d319db256c8316d3dfa95636b972c3c Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 21 Jun 2024 19:52:42 +0530 Subject: [PATCH 239/286] Return `dyn.InvalidValue` instead of `dyn.NilValue` when errors happen (#1514) ## Changes With https://github.com/databricks/cli/pull/1507 and https://github.com/databricks/cli/pull/1511 we are clarifying the semantics associated with `dyn.InvalidValue` and `dyn.NilValue`. An invalid value is the default zero value and is used to signals the complete absence of the value. A nil value, on the other hand, is a valid value for a piece of configuration and signals explicitly setting a key to nil in the configuration tree. In keeping with that theme, this PR returns `dyn.InvalidValue` instead of `dyn.NilValue` at error sites. This change is not expected to have a material change in behaviour and is being done to set the right convention since we have well-defined semantics associated with both `NilValue` and `InvalidValue`. ## Tests Unit tests and integration tests pass. Also manually scanned the changes and the associated call sites to verify the `NilValue` value itself was not being relied upon. --- bundle/config/generate/job.go | 2 +- bundle/config/mutator/rewrite_sync_paths.go | 6 ++--- bundle/deploy/terraform/tfdyn/rename_keys.go | 2 +- libs/dyn/merge/merge.go | 8 +++---- libs/dyn/merge/merge_test.go | 6 ++--- libs/dyn/yamlloader/loader.go | 24 ++++++++++---------- libs/dyn/yamlloader/yaml.go | 2 +- libs/dyn/yamlsaver/utils.go | 2 +- 8 files changed, 26 insertions(+), 26 deletions(-) diff --git a/bundle/config/generate/job.go b/bundle/config/generate/job.go index 469f84228..3ab5e0122 100644 --- a/bundle/config/generate/job.go +++ b/bundle/config/generate/job.go @@ -17,7 +17,7 @@ func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) { for _, task := range job.Settings.Tasks { v, err := convertTaskToValue(task, taskOrder) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } tasks = append(tasks, v) } diff --git a/bundle/config/mutator/rewrite_sync_paths.go b/bundle/config/mutator/rewrite_sync_paths.go index 710190230..85db79797 100644 --- a/bundle/config/mutator/rewrite_sync_paths.go +++ b/bundle/config/mutator/rewrite_sync_paths.go @@ -35,7 +35,7 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc { dir := filepath.Dir(v.Location().File) rel, err := filepath.Rel(root, dir) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } return dyn.NewValue(filepath.Join(rel, v.MustString()), v.Location()), nil @@ -47,11 +47,11 @@ func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Dia return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) { v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath))) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.RootPath))) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } return v, nil }) diff --git a/bundle/deploy/terraform/tfdyn/rename_keys.go b/bundle/deploy/terraform/tfdyn/rename_keys.go index a65c9f257..650ffb890 100644 --- a/bundle/deploy/terraform/tfdyn/rename_keys.go +++ b/bundle/deploy/terraform/tfdyn/rename_keys.go @@ -28,7 +28,7 @@ func renameKeys(v dyn.Value, rename map[string]string) (dyn.Value, error) { p[0] = dyn.Key(newKey) acc, err = dyn.SetByPath(acc, p, v) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } return dyn.InvalidValue, dyn.ErrDrop } diff --git a/libs/dyn/merge/merge.go b/libs/dyn/merge/merge.go index 69ccf516a..ffe000da3 100644 --- a/libs/dyn/merge/merge.go +++ b/libs/dyn/merge/merge.go @@ -34,17 +34,17 @@ func merge(a, b dyn.Value) (dyn.Value, error) { switch ak { case dyn.KindMap: if bk != dyn.KindMap { - return dyn.NilValue, fmt.Errorf("cannot merge map with %s", bk) + return dyn.InvalidValue, fmt.Errorf("cannot merge map with %s", bk) } return mergeMap(a, b) case dyn.KindSequence: if bk != dyn.KindSequence { - return dyn.NilValue, fmt.Errorf("cannot merge sequence with %s", bk) + return dyn.InvalidValue, fmt.Errorf("cannot merge sequence with %s", bk) } return mergeSequence(a, b) default: if ak != bk { - return dyn.NilValue, fmt.Errorf("cannot merge %s with %s", ak, bk) + return dyn.InvalidValue, fmt.Errorf("cannot merge %s with %s", ak, bk) } return mergePrimitive(a, b) } @@ -66,7 +66,7 @@ func mergeMap(a, b dyn.Value) (dyn.Value, error) { // If the key already exists, merge the values. merged, err := merge(ov, pv) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } out.Set(pk, merged) } else { diff --git a/libs/dyn/merge/merge_test.go b/libs/dyn/merge/merge_test.go index eaaaab16f..3706dbd77 100644 --- a/libs/dyn/merge/merge_test.go +++ b/libs/dyn/merge/merge_test.go @@ -76,7 +76,7 @@ func TestMergeMapsError(t *testing.T) { { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge map with string") - assert.Equal(t, dyn.NilValue, out) + assert.Equal(t, dyn.InvalidValue, out) } } @@ -151,7 +151,7 @@ func TestMergeSequencesError(t *testing.T) { { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge sequence with string") - assert.Equal(t, dyn.NilValue, out) + assert.Equal(t, dyn.InvalidValue, out) } } @@ -202,6 +202,6 @@ func TestMergePrimitivesError(t *testing.T) { { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge string with map") - assert.Equal(t, dyn.NilValue, out) + assert.Equal(t, dyn.InvalidValue, out) } } diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index 908793d58..e6a16f79e 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -55,7 +55,7 @@ func (d *loader) load(node *yaml.Node) (dyn.Value, error) { case yaml.AliasNode: value, err = d.loadAlias(node, loc) default: - return dyn.NilValue, errorf(loc, "unknown node kind: %v", node.Kind) + return dyn.InvalidValue, errorf(loc, "unknown node kind: %v", node.Kind) } if err != nil { @@ -80,7 +80,7 @@ func (d *loader) loadSequence(node *yaml.Node, loc dyn.Location) (dyn.Value, err for i, n := range node.Content { v, err := d.load(n) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } acc[i] = v @@ -99,7 +99,7 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro // Assert that keys are strings if key.Kind != yaml.ScalarNode { - return dyn.NilValue, errorf(loc, "key is not a scalar") + return dyn.InvalidValue, errorf(loc, "key is not a scalar") } st := key.ShortTag() @@ -113,17 +113,17 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro merge = val continue default: - return dyn.NilValue, errorf(loc, "invalid key tag: %v", st) + return dyn.InvalidValue, errorf(loc, "invalid key tag: %v", st) } k, err := d.load(key) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } v, err := d.load(val) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } acc.Set(k, v) @@ -155,7 +155,7 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro for _, n := range mnodes { v, err := d.load(n) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } m, ok := v.AsMap() if !ok { @@ -186,12 +186,12 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error case "false": return dyn.NewValue(false, loc), nil default: - return dyn.NilValue, errorf(loc, "invalid bool value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid bool value: %v", node.Value) } case "!!int": i64, err := strconv.ParseInt(node.Value, 10, 64) if err != nil { - return dyn.NilValue, errorf(loc, "invalid int value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid int value: %v", node.Value) } // Use regular int type instead of int64 if possible. if i64 >= math.MinInt32 && i64 <= math.MaxInt32 { @@ -201,7 +201,7 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error case "!!float": f64, err := strconv.ParseFloat(node.Value, 64) if err != nil { - return dyn.NilValue, errorf(loc, "invalid float value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid float value: %v", node.Value) } return dyn.NewValue(f64, loc), nil case "!!null": @@ -219,9 +219,9 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error return dyn.NewValue(t, loc), nil } } - return dyn.NilValue, errorf(loc, "invalid timestamp value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid timestamp value: %v", node.Value) default: - return dyn.NilValue, errorf(loc, "unknown tag: %v", st) + return dyn.InvalidValue, errorf(loc, "unknown tag: %v", st) } } diff --git a/libs/dyn/yamlloader/yaml.go b/libs/dyn/yamlloader/yaml.go index a18324ffa..b79b41e1e 100644 --- a/libs/dyn/yamlloader/yaml.go +++ b/libs/dyn/yamlloader/yaml.go @@ -15,7 +15,7 @@ func LoadYAML(path string, r io.Reader) (dyn.Value, error) { if err == io.EOF { return dyn.NilValue, nil } - return dyn.NilValue, err + return dyn.InvalidValue, err } return newLoader(path).load(&node) diff --git a/libs/dyn/yamlsaver/utils.go b/libs/dyn/yamlsaver/utils.go index 6149491d6..fa5ab08fb 100644 --- a/libs/dyn/yamlsaver/utils.go +++ b/libs/dyn/yamlsaver/utils.go @@ -15,7 +15,7 @@ func ConvertToMapValue(strct any, order *Order, skipFields []string, dst map[str ref := dyn.NilValue mv, err := convert.FromTyped(strct, ref) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } if mv.Kind() != dyn.KindMap { From 5ff06578ac9cec91fc3a0c4e34c566c9481296dc Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Mon, 24 Jun 2024 09:47:41 +0200 Subject: [PATCH 240/286] PythonMutator: replace stdin/stdout with files (#1512) ## Changes Replace stdin/stdout with files in `PythonMutator`. Files are created in a temporary directory. Rename `ApplyPythonMutator` to `PythonMutator`. Add test for `dyn.Location` behavior during the "load" stage. ## Tests Unit tests --- bundle/config/mutator/mutator.go | 2 +- ...ly_python_mutator.go => python_mutator.go} | 97 ++++++++++++---- ...mutator_test.go => python_mutator_test.go} | 104 ++++++++++++------ bundle/phases/initialize.go | 4 +- 4 files changed, 150 insertions(+), 57 deletions(-) rename bundle/config/mutator/python/{apply_python_mutator.go => python_mutator.go} (73%) rename bundle/config/mutator/python/{apply_python_mutator_test.go => python_mutator_test.go} (77%) diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index d6bfcb775..52f85eeb8 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -25,6 +25,6 @@ func DefaultMutators() []bundle.Mutator { InitializeVariables(), DefineDefaultTarget(), LoadGitDetails(), - pythonmutator.ApplyPythonMutator(pythonmutator.ApplyPythonMutatorPhaseLoad), + pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseLoad), } } diff --git a/bundle/config/mutator/python/apply_python_mutator.go b/bundle/config/mutator/python/python_mutator.go similarity index 73% rename from bundle/config/mutator/python/apply_python_mutator.go rename to bundle/config/mutator/python/python_mutator.go index 298ffb576..73ddf9529 100644 --- a/bundle/config/mutator/python/apply_python_mutator.go +++ b/bundle/config/mutator/python/python_mutator.go @@ -1,7 +1,6 @@ package python import ( - "bytes" "context" "encoding/json" "fmt" @@ -9,6 +8,8 @@ import ( "path/filepath" "runtime" + "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/diag" @@ -23,7 +24,7 @@ import ( type phase string const ( - // ApplyPythonMutatorPhaseLoad is the phase in which bundle configuration is loaded. + // PythonMutatorPhaseLoad is the phase in which bundle configuration is loaded. // // At this stage, PyDABs adds statically defined resources to the bundle configuration. // Which resources are added should be deterministic and not depend on the bundle configuration. @@ -31,9 +32,9 @@ const ( // We also open for possibility of appending other sections of bundle configuration, // for example, adding new variables. However, this is not supported yet, and CLI rejects // such changes. - ApplyPythonMutatorPhaseLoad phase = "load" + PythonMutatorPhaseLoad phase = "load" - // ApplyPythonMutatorPhaseInit is the phase after bundle configuration was loaded, and + // PythonMutatorPhaseInit is the phase after bundle configuration was loaded, and // the list of statically declared resources is known. // // At this stage, PyDABs adds resources defined using generators, or mutates existing resources, @@ -50,21 +51,21 @@ const ( // PyDABs can output YAML containing references to variables, and CLI should resolve them. // // Existing resources can't be removed, and CLI rejects such changes. - ApplyPythonMutatorPhaseInit phase = "init" + PythonMutatorPhaseInit phase = "init" ) -type applyPythonMutator struct { +type pythonMutator struct { phase phase } -func ApplyPythonMutator(phase phase) bundle.Mutator { - return &applyPythonMutator{ +func PythonMutator(phase phase) bundle.Mutator { + return &pythonMutator{ phase: phase, } } -func (m *applyPythonMutator) Name() string { - return fmt.Sprintf("ApplyPythonMutator(%s)", m.phase) +func (m *pythonMutator) Name() string { + return fmt.Sprintf("PythonMutator(%s)", m.phase) } func getExperimental(b *bundle.Bundle) config.Experimental { @@ -75,7 +76,7 @@ func getExperimental(b *bundle.Bundle) config.Experimental { return *b.Config.Experimental } -func (m *applyPythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { +func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { experimental := getExperimental(b) if !experimental.PyDABs.Enabled { @@ -97,7 +98,12 @@ func (m *applyPythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D } } - rightRoot, err := m.runPythonMutator(ctx, b.RootPath, pythonPath, leftRoot) + cacheDir, err := createCacheDir(ctx) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err) + } + + rightRoot, err := m.runPythonMutator(ctx, cacheDir, b.RootPath, pythonPath, leftRoot) if err != nil { return dyn.InvalidValue, err } @@ -113,13 +119,39 @@ func (m *applyPythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D return diag.FromErr(err) } -func (m *applyPythonMutator) runPythonMutator(ctx context.Context, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, error) { +func createCacheDir(ctx context.Context) (string, error) { + // b.CacheDir doesn't work because target isn't yet selected + + // support the same env variable as in b.CacheDir + if tempDir, exists := env.TempDir(ctx); exists { + // use 'default' as target name + cacheDir := filepath.Join(tempDir, "default", "pydabs") + + err := os.MkdirAll(cacheDir, 0700) + if err != nil { + return "", err + } + + return cacheDir, nil + } + + return os.MkdirTemp("", "-pydabs") +} + +func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, error) { + inputPath := filepath.Join(cacheDir, "input.json") + outputPath := filepath.Join(cacheDir, "output.json") + args := []string{ pythonPath, "-m", "databricks.bundles.build", "--phase", string(m.phase), + "--input", + inputPath, + "--output", + outputPath, } // we need to marshal dyn.Value instead of bundle.Config to JSON to support @@ -129,27 +161,48 @@ func (m *applyPythonMutator) runPythonMutator(ctx context.Context, rootPath stri return dyn.InvalidValue, fmt.Errorf("failed to marshal root config: %w", err) } - logWriter := newLogWriter(ctx, "stderr: ") + err = os.WriteFile(inputPath, rootConfigJson, 0600) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to write input file: %w", err) + } - stdout, err := process.Background( + stderrWriter := newLogWriter(ctx, "stderr: ") + stdoutWriter := newLogWriter(ctx, "stdout: ") + + _, err = process.Background( ctx, args, process.WithDir(rootPath), - process.WithStderrWriter(logWriter), - process.WithStdinReader(bytes.NewBuffer(rootConfigJson)), + process.WithStderrWriter(stderrWriter), + process.WithStdoutWriter(stdoutWriter), ) if err != nil { return dyn.InvalidValue, fmt.Errorf("python mutator process failed: %w", err) } - // we need absolute path, or because later parts of pipeline assume all paths are absolute - // and this file will be used as location + outputFile, err := os.Open(outputPath) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to open Python mutator output: %w", err) + } + + defer func() { + _ = outputFile.Close() + }() + + // we need absolute path because later parts of pipeline assume all paths are absolute + // and this file will be used as location to resolve relative paths. + // + // virtualPath has to stay in rootPath, because locations outside root path are not allowed: + // + // Error: path /var/folders/.../pydabs/dist/*.whl is not contained in bundle root path + // + // for that, we pass virtualPath instead of outputPath as file location virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml")) if err != nil { return dyn.InvalidValue, fmt.Errorf("failed to get absolute path: %w", err) } - generated, err := yamlloader.LoadYAML(virtualPath, bytes.NewReader([]byte(stdout))) + generated, err := yamlloader.LoadYAML(virtualPath, outputFile) if err != nil { return dyn.InvalidValue, fmt.Errorf("failed to parse Python mutator output: %w", err) } @@ -171,9 +224,9 @@ func (m *applyPythonMutator) runPythonMutator(ctx context.Context, rootPath stri func createOverrideVisitor(ctx context.Context, phase phase) (merge.OverrideVisitor, error) { switch phase { - case ApplyPythonMutatorPhaseLoad: + case PythonMutatorPhaseLoad: return createLoadOverrideVisitor(ctx), nil - case ApplyPythonMutatorPhaseInit: + case PythonMutatorPhaseInit: return createInitOverrideVisitor(ctx), nil default: return merge.OverrideVisitor{}, fmt.Errorf("unknown phase: %s", phase) diff --git a/bundle/config/mutator/python/apply_python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go similarity index 77% rename from bundle/config/mutator/python/apply_python_mutator_test.go rename to bundle/config/mutator/python/python_mutator_test.go index 8759ab801..e2c20386a 100644 --- a/bundle/config/mutator/python/apply_python_mutator_test.go +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -10,6 +10,9 @@ import ( "runtime" "testing" + "github.com/databricks/cli/bundle/env" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" "github.com/databricks/cli/libs/dyn" @@ -20,19 +23,19 @@ import ( "github.com/databricks/cli/libs/process" ) -func TestApplyPythonMutator_Name_load(t *testing.T) { - mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) +func TestPythonMutator_Name_load(t *testing.T) { + mutator := PythonMutator(PythonMutatorPhaseLoad) - assert.Equal(t, "ApplyPythonMutator(load)", mutator.Name()) + assert.Equal(t, "PythonMutator(load)", mutator.Name()) } -func TestApplyPythonMutator_Name_init(t *testing.T) { - mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseInit) +func TestPythonMutator_Name_init(t *testing.T) { + mutator := PythonMutator(PythonMutatorPhaseInit) - assert.Equal(t, "ApplyPythonMutator(init)", mutator.Name()) + assert.Equal(t, "PythonMutator(init)", mutator.Name()) } -func TestApplyPythonMutator_load(t *testing.T) { +func TestPythonMutator_load(t *testing.T) { withFakeVEnv(t, ".venv") b := loadYaml("databricks.yml", ` @@ -46,6 +49,7 @@ func TestApplyPythonMutator_load(t *testing.T) { name: job_0`) ctx := withProcessStub( + t, []string{ interpreterPath(".venv"), "-m", @@ -72,7 +76,7 @@ func TestApplyPythonMutator_load(t *testing.T) { } }`) - mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoad) diag := bundle.Apply(ctx, b, mutator) assert.NoError(t, diag.Error()) @@ -88,7 +92,7 @@ func TestApplyPythonMutator_load(t *testing.T) { } } -func TestApplyPythonMutator_load_disallowed(t *testing.T) { +func TestPythonMutator_load_disallowed(t *testing.T) { withFakeVEnv(t, ".venv") b := loadYaml("databricks.yml", ` @@ -102,6 +106,7 @@ func TestApplyPythonMutator_load_disallowed(t *testing.T) { name: job_0`) ctx := withProcessStub( + t, []string{ interpreterPath(".venv"), "-m", @@ -126,13 +131,13 @@ func TestApplyPythonMutator_load_disallowed(t *testing.T) { } }`) - mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoad) diag := bundle.Apply(ctx, b, mutator) assert.EqualError(t, diag.Error(), "unexpected change at \"resources.jobs.job0.description\" (insert)") } -func TestApplyPythonMutator_init(t *testing.T) { +func TestPythonMutator_init(t *testing.T) { withFakeVEnv(t, ".venv") b := loadYaml("databricks.yml", ` @@ -146,6 +151,7 @@ func TestApplyPythonMutator_init(t *testing.T) { name: job_0`) ctx := withProcessStub( + t, []string{ interpreterPath(".venv"), "-m", @@ -170,7 +176,7 @@ func TestApplyPythonMutator_init(t *testing.T) { } }`) - mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseInit) + mutator := PythonMutator(PythonMutatorPhaseInit) diag := bundle.Apply(ctx, b, mutator) assert.NoError(t, diag.Error()) @@ -178,9 +184,28 @@ func TestApplyPythonMutator_init(t *testing.T) { assert.ElementsMatch(t, []string{"job0"}, maps.Keys(b.Config.Resources.Jobs)) assert.Equal(t, "job_0", b.Config.Resources.Jobs["job0"].Name) assert.Equal(t, "my job", b.Config.Resources.Jobs["job0"].Description) + + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + // 'name' wasn't changed, so it keeps its location + name, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.name")) + require.NoError(t, err) + assert.Equal(t, "databricks.yml", name.Location().File) + + // 'description' was updated by PyDABs and has location of generated file until + // we implement source maps + description, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.description")) + require.NoError(t, err) + + expectedVirtualPath, err := filepath.Abs("__generated_by_pydabs__.yml") + require.NoError(t, err) + assert.Equal(t, expectedVirtualPath, description.Location().File) + + return v, nil + }) + assert.NoError(t, err) } -func TestApplyPythonMutator_badOutput(t *testing.T) { +func TestPythonMutator_badOutput(t *testing.T) { withFakeVEnv(t, ".venv") b := loadYaml("databricks.yml", ` @@ -194,6 +219,7 @@ func TestApplyPythonMutator_badOutput(t *testing.T) { name: job_0`) ctx := withProcessStub( + t, []string{ interpreterPath(".venv"), "-m", @@ -211,36 +237,36 @@ func TestApplyPythonMutator_badOutput(t *testing.T) { } }`) - mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoad) diag := bundle.Apply(ctx, b, mutator) assert.EqualError(t, diag.Error(), "failed to normalize Python mutator output: unknown field: unknown_property") } -func TestApplyPythonMutator_disabled(t *testing.T) { +func TestPythonMutator_disabled(t *testing.T) { b := loadYaml("databricks.yml", ``) ctx := context.Background() - mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoad) diag := bundle.Apply(ctx, b, mutator) assert.NoError(t, diag.Error()) } -func TestApplyPythonMutator_venvRequired(t *testing.T) { +func TestPythonMutator_venvRequired(t *testing.T) { b := loadYaml("databricks.yml", ` experimental: pydabs: enabled: true`) ctx := context.Background() - mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseLoad) + mutator := PythonMutator(PythonMutatorPhaseLoad) diag := bundle.Apply(ctx, b, mutator) assert.Error(t, diag.Error(), "\"experimental.enable_pydabs\" is enabled, but \"experimental.venv.path\" is not set") } -func TestApplyPythonMutator_venvNotFound(t *testing.T) { +func TestPythonMutator_venvNotFound(t *testing.T) { expectedError := fmt.Sprintf("can't find %q, check if venv is created", interpreterPath("bad_path")) b := loadYaml("databricks.yml", ` @@ -249,7 +275,7 @@ func TestApplyPythonMutator_venvNotFound(t *testing.T) { enabled: true venv_path: bad_path`) - mutator := ApplyPythonMutator(ApplyPythonMutatorPhaseInit) + mutator := PythonMutator(PythonMutatorPhaseInit) diag := bundle.Apply(context.Background(), b, mutator) assert.EqualError(t, diag.Error(), expectedError) @@ -273,7 +299,7 @@ func TestCreateOverrideVisitor(t *testing.T) { testCases := []createOverrideVisitorTestCase{ { name: "load: can't change an existing job", - phase: ApplyPythonMutatorPhaseLoad, + phase: PythonMutatorPhaseLoad, updatePath: dyn.MustPathFromString("resources.jobs.job0.name"), deletePath: dyn.MustPathFromString("resources.jobs.job0.name"), insertPath: dyn.MustPathFromString("resources.jobs.job0.name"), @@ -283,19 +309,19 @@ func TestCreateOverrideVisitor(t *testing.T) { }, { name: "load: can't delete an existing job", - phase: ApplyPythonMutatorPhaseLoad, + phase: PythonMutatorPhaseLoad, deletePath: dyn.MustPathFromString("resources.jobs.job0"), deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), }, { name: "load: can insert a job", - phase: ApplyPythonMutatorPhaseLoad, + phase: PythonMutatorPhaseLoad, insertPath: dyn.MustPathFromString("resources.jobs.job0"), insertError: nil, }, { name: "load: can't change include", - phase: ApplyPythonMutatorPhaseLoad, + phase: PythonMutatorPhaseLoad, deletePath: dyn.MustPathFromString("include[0]"), insertPath: dyn.MustPathFromString("include[0]"), updatePath: dyn.MustPathFromString("include[0]"), @@ -305,7 +331,7 @@ func TestCreateOverrideVisitor(t *testing.T) { }, { name: "init: can change an existing job", - phase: ApplyPythonMutatorPhaseInit, + phase: PythonMutatorPhaseInit, updatePath: dyn.MustPathFromString("resources.jobs.job0.name"), deletePath: dyn.MustPathFromString("resources.jobs.job0.name"), insertPath: dyn.MustPathFromString("resources.jobs.job0.name"), @@ -315,19 +341,19 @@ func TestCreateOverrideVisitor(t *testing.T) { }, { name: "init: can't delete an existing job", - phase: ApplyPythonMutatorPhaseInit, + phase: PythonMutatorPhaseInit, deletePath: dyn.MustPathFromString("resources.jobs.job0"), deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), }, { name: "init: can insert a job", - phase: ApplyPythonMutatorPhaseInit, + phase: PythonMutatorPhaseInit, insertPath: dyn.MustPathFromString("resources.jobs.job0"), insertError: nil, }, { name: "init: can't change include", - phase: ApplyPythonMutatorPhaseInit, + phase: PythonMutatorPhaseInit, deletePath: dyn.MustPathFromString("include[0]"), insertPath: dyn.MustPathFromString("include[0]"), updatePath: dyn.MustPathFromString("include[0]"), @@ -391,14 +417,28 @@ func TestInterpreterPath(t *testing.T) { } } -func withProcessStub(args []string, stdout string) context.Context { +func withProcessStub(t *testing.T, args []string, stdout string) context.Context { ctx := context.Background() ctx, stub := process.WithStub(ctx) - stub.WithCallback(func(actual *exec.Cmd) error { - if reflect.DeepEqual(actual.Args, args) { - _, err := actual.Stdout.Write([]byte(stdout)) + t.Setenv(env.TempDirVariable, t.TempDir()) + // after we override env variable, we always get the same cache dir as mutator + cacheDir, err := createCacheDir(ctx) + require.NoError(t, err) + + inputPath := filepath.Join(cacheDir, "input.json") + outputPath := filepath.Join(cacheDir, "output.json") + + args = append(args, "--input", inputPath) + args = append(args, "--output", outputPath) + + stub.WithCallback(func(actual *exec.Cmd) error { + _, err := os.Stat(inputPath) + assert.NoError(t, err) + + if reflect.DeepEqual(actual.Args, args) { + err := os.WriteFile(outputPath, []byte(stdout), 0600) return err } else { return fmt.Errorf("unexpected command: %v", actual.Args) diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index d96c8d3b3..d96ee0ebf 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -30,8 +30,8 @@ func Initialize() bundle.Mutator { mutator.DefineDefaultWorkspacePaths(), mutator.SetVariables(), // Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences - // and ResolveVariableReferences. See what is expected in ApplyPythonMutatorPhaseInit doc - pythonmutator.ApplyPythonMutator(pythonmutator.ApplyPythonMutatorPhaseInit), + // and ResolveVariableReferences. See what is expected in PythonMutatorPhaseInit doc + pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseInit), mutator.ResolveVariableReferencesInLookup(), mutator.ResolveResourceReferences(), mutator.ResolveVariableReferences( From 8957f1e7cf65a7b49f99419f7041779c00b96900 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 24 Jun 2024 12:15:13 +0200 Subject: [PATCH 241/286] Return `fs.ModeDir` for Git folders in the workspace (#1521) ## Changes Not doing this meant file system traversal ended upon reaching a Git folder. By marking these objects as a directory globbing traverses into these folders as well. ## Tests Added a unit test for coverage. --- libs/filer/workspace_files_client.go | 25 ++++++---- libs/filer/workspace_files_client_test.go | 56 +++++++++++++++++++++++ 2 files changed, 71 insertions(+), 10 deletions(-) create mode 100644 libs/filer/workspace_files_client_test.go diff --git a/libs/filer/workspace_files_client.go b/libs/filer/workspace_files_client.go index 41e35d9d1..09f11b161 100644 --- a/libs/filer/workspace_files_client.go +++ b/libs/filer/workspace_files_client.go @@ -35,6 +35,17 @@ func (entry wsfsDirEntry) Info() (fs.FileInfo, error) { return entry.wsfsFileInfo, nil } +func wsfsDirEntriesFromObjectInfos(objects []workspace.ObjectInfo) []fs.DirEntry { + info := make([]fs.DirEntry, len(objects)) + for i, v := range objects { + info[i] = wsfsDirEntry{wsfsFileInfo{oi: v}} + } + + // Sort by name for parity with os.ReadDir. + sort.Slice(info, func(i, j int) bool { return info[i].Name() < info[j].Name() }) + return info +} + // Type that implements fs.FileInfo for WSFS. type wsfsFileInfo struct { oi workspace.ObjectInfo @@ -50,7 +61,7 @@ func (info wsfsFileInfo) Size() int64 { func (info wsfsFileInfo) Mode() fs.FileMode { switch info.oi.ObjectType { - case workspace.ObjectTypeDirectory: + case workspace.ObjectTypeDirectory, workspace.ObjectTypeRepo: return fs.ModeDir default: return fs.ModePerm @@ -62,7 +73,7 @@ func (info wsfsFileInfo) ModTime() time.Time { } func (info wsfsFileInfo) IsDir() bool { - return info.oi.ObjectType == workspace.ObjectTypeDirectory + return info.Mode() == fs.ModeDir } func (info wsfsFileInfo) Sys() any { @@ -262,14 +273,8 @@ func (w *WorkspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.D return nil, err } - info := make([]fs.DirEntry, len(objects)) - for i, v := range objects { - info[i] = wsfsDirEntry{wsfsFileInfo{oi: v}} - } - - // Sort by name for parity with os.ReadDir. - sort.Slice(info, func(i, j int) bool { return info[i].Name() < info[j].Name() }) - return info, nil + // Convert to fs.DirEntry. + return wsfsDirEntriesFromObjectInfos(objects), nil } func (w *WorkspaceFilesClient) Mkdir(ctx context.Context, name string) error { diff --git a/libs/filer/workspace_files_client_test.go b/libs/filer/workspace_files_client_test.go new file mode 100644 index 000000000..4e9537641 --- /dev/null +++ b/libs/filer/workspace_files_client_test.go @@ -0,0 +1,56 @@ +package filer + +import ( + "io/fs" + "testing" + + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWorkspaceFilesDirEntry(t *testing.T) { + entries := wsfsDirEntriesFromObjectInfos([]workspace.ObjectInfo{ + { + Path: "/dir", + ObjectType: workspace.ObjectTypeDirectory, + }, + { + Path: "/file", + ObjectType: workspace.ObjectTypeFile, + Size: 42, + }, + { + Path: "/repo", + ObjectType: workspace.ObjectTypeRepo, + }, + }) + + // Confirm the path is passed through correctly. + assert.Equal(t, "dir", entries[0].Name()) + assert.Equal(t, "file", entries[1].Name()) + assert.Equal(t, "repo", entries[2].Name()) + + // Confirm the type is passed through correctly. + assert.Equal(t, fs.ModeDir, entries[0].Type()) + assert.Equal(t, fs.ModePerm, entries[1].Type()) + assert.Equal(t, fs.ModeDir, entries[2].Type()) + + // Get [fs.FileInfo] from directory entry. + i0, err := entries[0].Info() + require.NoError(t, err) + i1, err := entries[1].Info() + require.NoError(t, err) + i2, err := entries[2].Info() + require.NoError(t, err) + + // Confirm size. + assert.Equal(t, int64(0), i0.Size()) + assert.Equal(t, int64(42), i1.Size()) + assert.Equal(t, int64(0), i2.Size()) + + // Confirm IsDir. + assert.True(t, i0.IsDir()) + assert.False(t, i1.IsDir()) + assert.True(t, i2.IsDir()) +} From 2ec6abf74ebde3752467849f0d8e3093b35fed66 Mon Sep 17 00:00:00 2001 From: Kai Zhu <87322035+kai-zhu-sonatype@users.noreply.github.com> Date: Mon, 24 Jun 2024 06:56:49 -0400 Subject: [PATCH 242/286] Fix `databricks configure` to use DATABRICKS_CONFIG_FILE environment variable if exists as config file (#1325) ## Changes added `ConfigFile: cfg.ConfigFile` for `databrickscfg.SaveToProfile` in `cmd/configure/configure.go` to save the file in a specified path when the value is not empty ## Tests `TestConfigFileFromEnvNoInteractive` in `cmd/configure/configure_test.go` sets a different config file path by `DATABRICKS_CONFIG_FILE`, after execution, the overwrite config file is generated, and the default path has no file. --- cmd/configure/configure.go | 9 +++++---- cmd/configure/configure_test.go | 10 +++++++++- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index 1e94ddae8..895a5902c 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -139,10 +139,11 @@ The host must be specified with the --host flag or the DATABRICKS_HOST environme // Save profile to config file. return databrickscfg.SaveToProfile(ctx, &config.Config{ - Profile: cfg.Profile, - Host: cfg.Host, - Token: cfg.Token, - ClusterID: cfg.ClusterID, + Profile: cfg.Profile, + Host: cfg.Host, + Token: cfg.Token, + ClusterID: cfg.ClusterID, + ConfigFile: cfg.ConfigFile, }) } diff --git a/cmd/configure/configure_test.go b/cmd/configure/configure_test.go index 259c83adb..a127fe57a 100644 --- a/cmd/configure/configure_test.go +++ b/cmd/configure/configure_test.go @@ -78,7 +78,8 @@ func TestConfigFileFromEnvNoInteractive(t *testing.T) { //TODO: Replace with similar test code from go SDK, once we start using it directly ctx := context.Background() tempHomeDir := setup(t) - cfgPath := filepath.Join(tempHomeDir, ".databrickscfg") + defaultCfgPath := filepath.Join(tempHomeDir, ".databrickscfg") + cfgPath := filepath.Join(tempHomeDir, "overwrite-databricks-cfg") t.Setenv("DATABRICKS_CONFIG_FILE", cfgPath) inp := getTempFileWithContent(t, tempHomeDir, "token\n") @@ -96,6 +97,13 @@ func TestConfigFileFromEnvNoInteractive(t *testing.T) { _, err = os.Stat(cfgPath) assert.NoError(t, err) + _, err = os.Stat(defaultCfgPath) + if runtime.GOOS == "windows" { + assert.ErrorContains(t, err, "cannot find the file specified") + } else { + assert.ErrorContains(t, err, "no such file or directory") + } + cfg, err := ini.Load(cfgPath) assert.NoError(t, err) From 100a0516d43198b6421268b68dc65a61fe45c3e6 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 25 Jun 2024 12:04:22 +0200 Subject: [PATCH 243/286] Add context type and value to path rewriting (#1525) ## Changes For a future change where the inner rewriting functions need access to the underlying bundle, this change makes preparations. All values were passed via the stack before and adding yet another value would make the code less readable. ## Tests Unit tests pass. --- bundle/config/mutator/translate_paths.go | 69 +++++++++++-------- .../mutator/translate_paths_artifacts.go | 28 +++++--- bundle/config/mutator/translate_paths_jobs.go | 63 +++++++++-------- .../mutator/translate_paths_pipelines.go | 38 ++++++---- 4 files changed, 115 insertions(+), 83 deletions(-) diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index d9ab9e9e8..4224eafd4 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -33,9 +33,7 @@ func (err ErrIsNotNotebook) Error() string { return fmt.Sprintf("file at %s is not a notebook", err.path) } -type translatePaths struct { - seen map[string]string -} +type translatePaths struct{} // TranslatePaths converts paths to local notebook files into paths in the workspace file system. func TranslatePaths() bundle.Mutator { @@ -48,6 +46,18 @@ func (m *translatePaths) Name() string { type rewriteFunc func(literal, localFullPath, localRelPath, remotePath string) (string, error) +// translateContext is a context for rewriting paths in a config. +// It is freshly instantiated on every mutator apply call. +// It provides access to the underlying bundle object such that +// it doesn't have to be passed around explicitly. +type translateContext struct { + b *bundle.Bundle + + // seen is a map of local paths to their corresponding remote paths. + // If a local path has already been successfully resolved, we do not need to resolve it again. + seen map[string]string +} + // rewritePath converts a given relative path from the loaded config to a new path based on the passed rewriting function // // It takes these arguments: @@ -57,14 +67,13 @@ type rewriteFunc func(literal, localFullPath, localRelPath, remotePath string) ( // This logic is different between regular files or notebooks. // // The function returns an error if it is impossible to rewrite the given relative path. -func (m *translatePaths) rewritePath( +func (t *translateContext) rewritePath( dir string, - b *bundle.Bundle, p *string, fn rewriteFunc, ) error { // We assume absolute paths point to a location in the workspace - if path.IsAbs(filepath.ToSlash(*p)) { + if path.IsAbs(*p) { return nil } @@ -80,13 +89,14 @@ func (m *translatePaths) rewritePath( // Local path is relative to the directory the resource was defined in. localPath := filepath.Join(dir, filepath.FromSlash(*p)) - if interp, ok := m.seen[localPath]; ok { + if interp, ok := t.seen[localPath]; ok { *p = interp return nil } - // Remote path must be relative to the bundle root. - localRelPath, err := filepath.Rel(b.RootPath, localPath) + // Local path must be contained in the bundle root. + // If it isn't, it won't be synchronized into the workspace. + localRelPath, err := filepath.Rel(t.b.RootPath, localPath) if err != nil { return err } @@ -95,20 +105,20 @@ func (m *translatePaths) rewritePath( } // Prefix remote path with its remote root path. - remotePath := path.Join(b.Config.Workspace.FilePath, filepath.ToSlash(localRelPath)) + remotePath := path.Join(t.b.Config.Workspace.FilePath, filepath.ToSlash(localRelPath)) // Convert local path into workspace path via specified function. - interp, err := fn(*p, localPath, localRelPath, filepath.ToSlash(remotePath)) + interp, err := fn(*p, localPath, localRelPath, remotePath) if err != nil { return err } *p = interp - m.seen[localPath] = interp + t.seen[localPath] = interp return nil } -func translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { +func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { nb, _, err := notebook.Detect(localFullPath) if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("notebook %s not found", literal) @@ -124,7 +134,7 @@ func translateNotebookPath(literal, localFullPath, localRelPath, remotePath stri return strings.TrimSuffix(remotePath, filepath.Ext(localFullPath)), nil } -func translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { +func (t *translateContext) translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { nb, _, err := notebook.Detect(localFullPath) if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("file %s not found", literal) @@ -138,7 +148,7 @@ func translateFilePath(literal, localFullPath, localRelPath, remotePath string) return remotePath, nil } -func translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { +func (t *translateContext) translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { info, err := os.Stat(localFullPath) if err != nil { return "", err @@ -149,20 +159,20 @@ func translateDirectoryPath(literal, localFullPath, localRelPath, remotePath str return remotePath, nil } -func translateNoOp(literal, localFullPath, localRelPath, remotePath string) (string, error) { +func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, remotePath string) (string, error) { return localRelPath, nil } -func translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) { +func (t *translateContext) translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) { if !strings.HasPrefix(localRelPath, ".") { localRelPath = "." + string(filepath.Separator) + localRelPath } return localRelPath, nil } -func (m *translatePaths) rewriteValue(b *bundle.Bundle, p dyn.Path, v dyn.Value, fn rewriteFunc, dir string) (dyn.Value, error) { +func (t *translateContext) rewriteValue(p dyn.Path, v dyn.Value, fn rewriteFunc, dir string) (dyn.Value, error) { out := v.MustString() - err := m.rewritePath(dir, b, &out, fn) + err := t.rewritePath(dir, &out, fn) if err != nil { if target := (&ErrIsNotebook{}); errors.As(err, target) { return dyn.InvalidValue, fmt.Errorf(`expected a file for "%s" but got a notebook: %w`, p, target) @@ -176,15 +186,15 @@ func (m *translatePaths) rewriteValue(b *bundle.Bundle, p dyn.Path, v dyn.Value, return dyn.NewValue(out, v.Location()), nil } -func (m *translatePaths) rewriteRelativeTo(b *bundle.Bundle, p dyn.Path, v dyn.Value, fn rewriteFunc, dir, fallback string) (dyn.Value, error) { - nv, err := m.rewriteValue(b, p, v, fn, dir) +func (t *translateContext) rewriteRelativeTo(p dyn.Path, v dyn.Value, fn rewriteFunc, dir, fallback string) (dyn.Value, error) { + nv, err := t.rewriteValue(p, v, fn, dir) if err == nil { return nv, nil } // If we failed to rewrite the path, try to rewrite it relative to the fallback directory. if fallback != "" { - nv, nerr := m.rewriteValue(b, p, v, fn, fallback) + nv, nerr := t.rewriteValue(p, v, fn, fallback) if nerr == nil { // TODO: Emit a warning that this path should be rewritten. return nv, nil @@ -195,16 +205,19 @@ func (m *translatePaths) rewriteRelativeTo(b *bundle.Bundle, p dyn.Path, v dyn.V } func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { - m.seen = make(map[string]string) + t := &translateContext{ + b: b, + seen: make(map[string]string), + } err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { var err error - for _, fn := range []func(*bundle.Bundle, dyn.Value) (dyn.Value, error){ - m.applyJobTranslations, - m.applyPipelineTranslations, - m.applyArtifactTranslations, + for _, fn := range []func(dyn.Value) (dyn.Value, error){ + t.applyJobTranslations, + t.applyPipelineTranslations, + t.applyArtifactTranslations, } { - v, err = fn(b, v) + v, err = fn(v) if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/config/mutator/translate_paths_artifacts.go b/bundle/config/mutator/translate_paths_artifacts.go index 7bda04eec..921c00c73 100644 --- a/bundle/config/mutator/translate_paths_artifacts.go +++ b/bundle/config/mutator/translate_paths_artifacts.go @@ -3,36 +3,42 @@ package mutator import ( "fmt" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/dyn" ) -func (m *translatePaths) applyArtifactTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { - var err error +type artifactRewritePattern struct { + pattern dyn.Pattern + fn rewriteFunc +} +func (t *translateContext) artifactRewritePatterns() []artifactRewritePattern { // Base pattern to match all artifacts. base := dyn.NewPattern( dyn.Key("artifacts"), dyn.AnyKey(), ) - for _, t := range []struct { - pattern dyn.Pattern - fn rewriteFunc - }{ + // Compile list of configuration paths to rewrite. + return []artifactRewritePattern{ { base.Append(dyn.Key("path")), - translateNoOp, + t.translateNoOp, }, - } { - v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + } +} + +func (t *translateContext) applyArtifactTranslations(v dyn.Value) (dyn.Value, error) { + var err error + + for _, rewritePattern := range t.artifactRewritePatterns() { + v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { key := p[1].Key() dir, err := v.Location().Directory() if err != nil { return dyn.InvalidValue, fmt.Errorf("unable to determine directory for artifact %s: %w", key, err) } - return m.rewriteRelativeTo(b, p, v, t.fn, dir, "") + return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, "") }) if err != nil { return dyn.InvalidValue, err diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go index 58b5e0fb0..60cc8bb9a 100644 --- a/bundle/config/mutator/translate_paths_jobs.go +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -4,7 +4,6 @@ import ( "fmt" "slices" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/libs/dyn" ) @@ -19,55 +18,42 @@ func noSkipRewrite(string) bool { return false } -func rewritePatterns(base dyn.Pattern) []jobRewritePattern { +func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern { return []jobRewritePattern{ { base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")), - translateNotebookPath, + t.translateNotebookPath, noSkipRewrite, }, { base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")), - translateFilePath, + t.translateFilePath, noSkipRewrite, }, { base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")), - translateDirectoryPath, + t.translateDirectoryPath, noSkipRewrite, }, { base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")), - translateFilePath, + t.translateFilePath, noSkipRewrite, }, { base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")), - translateNoOp, + t.translateNoOp, noSkipRewrite, }, { base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")), - translateNoOp, + t.translateNoOp, noSkipRewrite, }, } } -func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { - fallback, err := gatherFallbackPaths(v, "jobs") - if err != nil { - return dyn.InvalidValue, err - } - - // Do not translate job task paths if using Git source - var ignore []string - for key, job := range b.Config.Resources.Jobs { - if job.GitSource != nil { - ignore = append(ignore, key) - } - } - +func (t *translateContext) jobRewritePatterns() []jobRewritePattern { // Base pattern to match all tasks in all jobs. base := dyn.NewPattern( dyn.Key("resources"), @@ -90,19 +76,38 @@ func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dy dyn.Key("dependencies"), dyn.AnyIndex(), ), - translateNoOpWithPrefix, + t.translateNoOpWithPrefix, func(s string) bool { return !libraries.IsEnvironmentDependencyLocal(s) }, }, } - taskPatterns := rewritePatterns(base) - forEachPatterns := rewritePatterns(base.Append(dyn.Key("for_each_task"), dyn.Key("task"))) + + taskPatterns := rewritePatterns(t, base) + forEachPatterns := rewritePatterns(t, base.Append(dyn.Key("for_each_task"), dyn.Key("task"))) allPatterns := append(taskPatterns, jobEnvironmentsPatterns...) allPatterns = append(allPatterns, forEachPatterns...) + return allPatterns +} - for _, t := range allPatterns { - v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { +func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error) { + var err error + + fallback, err := gatherFallbackPaths(v, "jobs") + if err != nil { + return dyn.InvalidValue, err + } + + // Do not translate job task paths if using Git source + var ignore []string + for key, job := range t.b.Config.Resources.Jobs { + if job.GitSource != nil { + ignore = append(ignore, key) + } + } + + for _, rewritePattern := range t.jobRewritePatterns() { + v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { key := p[2].Key() // Skip path translation if the job is using git source. @@ -116,10 +121,10 @@ func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dy } sv := v.MustString() - if t.skipRewrite(sv) { + if rewritePattern.skipRewrite(sv) { return v, nil } - return m.rewriteRelativeTo(b, p, v, t.fn, dir, fallback[key]) + return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, fallback[key]) }) if err != nil { return dyn.InvalidValue, err diff --git a/bundle/config/mutator/translate_paths_pipelines.go b/bundle/config/mutator/translate_paths_pipelines.go index 5b2a2c346..71a65e846 100644 --- a/bundle/config/mutator/translate_paths_pipelines.go +++ b/bundle/config/mutator/translate_paths_pipelines.go @@ -3,16 +3,15 @@ package mutator import ( "fmt" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/dyn" ) -func (m *translatePaths) applyPipelineTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { - fallback, err := gatherFallbackPaths(v, "pipelines") - if err != nil { - return dyn.InvalidValue, err - } +type pipelineRewritePattern struct { + pattern dyn.Pattern + fn rewriteFunc +} +func (t *translateContext) pipelineRewritePatterns() []pipelineRewritePattern { // Base pattern to match all libraries in all pipelines. base := dyn.NewPattern( dyn.Key("resources"), @@ -22,27 +21,36 @@ func (m *translatePaths) applyPipelineTranslations(b *bundle.Bundle, v dyn.Value dyn.AnyIndex(), ) - for _, t := range []struct { - pattern dyn.Pattern - fn rewriteFunc - }{ + // Compile list of configuration paths to rewrite. + return []pipelineRewritePattern{ { base.Append(dyn.Key("notebook"), dyn.Key("path")), - translateNotebookPath, + t.translateNotebookPath, }, { base.Append(dyn.Key("file"), dyn.Key("path")), - translateFilePath, + t.translateFilePath, }, - } { - v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + } +} + +func (t *translateContext) applyPipelineTranslations(v dyn.Value) (dyn.Value, error) { + var err error + + fallback, err := gatherFallbackPaths(v, "pipelines") + if err != nil { + return dyn.InvalidValue, err + } + + for _, rewritePattern := range t.pipelineRewritePatterns() { + v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { key := p[2].Key() dir, err := v.Location().Directory() if err != nil { return dyn.InvalidValue, fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) } - return m.rewriteRelativeTo(b, p, v, t.fn, dir, fallback[key]) + return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, fallback[key]) }) if err != nil { return dyn.InvalidValue, err From 8468878eed293c836a7c8a52e69ca38f84417980 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Jun 2024 14:51:17 +0200 Subject: [PATCH 244/286] Bump github.com/databricks/databricks-sdk-go from 0.42.0 to 0.43.0 (#1522) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.42.0 to 0.43.0.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.43.0

Major Changes and Improvements:

  • Support partners in user agent for SDK (#925).
  • Add serverless_compute_id field to the config (#952).

Other Changes:

  • Generate from latest spec (#944) and (#947).

API Changes:

OpenAPI SHA: 7437dabb9dadee402c1fc060df4c1ce8cc5369f0, Date: 2024-06-25

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

0.43.0

Major Changes and Improvements:

  • Support partners in user agent for SDK (#925).
  • Add serverless_compute_id field to the config (#952).

Other Changes:

  • Generate from latest spec (#944) and (#947).

API Changes:

OpenAPI SHA: 7437dabb9dadee402c1fc060df4c1ce8cc5369f0, Date: 2024-06-25

Commits

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.42.0&new-version=0.43.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Pieter Noordhuis --- .codegen/_openapi_sha | 2 +- .codegen/cmds-account.go.tmpl | 4 +- .codegen/cmds-workspace.go.tmpl | 4 +- .codegen/service.go.tmpl | 1 + bundle/schema/docs/bundle_descriptions.json | 121 +++- cmd/workspace/alerts/alerts.go | 44 +- cmd/workspace/apps/apps.go | 59 ++ cmd/workspace/catalogs/catalogs.go | 2 + cmd/workspace/dashboards/dashboards.go | 4 +- cmd/workspace/data-sources/data-sources.go | 14 +- .../external-locations/external-locations.go | 1 + cmd/workspace/functions/functions.go | 2 + cmd/workspace/jobs/jobs.go | 11 +- cmd/workspace/lakeview/lakeview.go | 653 ++++++++++++++++++ cmd/workspace/queries/queries.go | 51 +- .../storage-credentials.go | 1 + .../vector-search-indexes.go | 72 ++ go.mod | 2 +- go.sum | 4 +- 19 files changed, 1005 insertions(+), 47 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index de0f45ab9..c4b47ca14 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -37b925eba37dfb3d7e05b6ba2d458454ce62d3a0 \ No newline at end of file +7437dabb9dadee402c1fc060df4c1ce8cc5369f0 \ No newline at end of file diff --git a/.codegen/cmds-account.go.tmpl b/.codegen/cmds-account.go.tmpl index 24b6bdd7c..43834b698 100644 --- a/.codegen/cmds-account.go.tmpl +++ b/.codegen/cmds-account.go.tmpl @@ -7,7 +7,7 @@ package account import ( "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" - {{range .Services}}{{if and .IsAccounts (not .HasParent)}}{{if not (in $excludes .KebabName) }} + {{range .Services}}{{if and .IsAccounts (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) }} {{.SnakeName}} "github.com/databricks/cli/cmd/account/{{(.TrimPrefix "account").KebabName}}"{{end}}{{end}}{{end}} ) @@ -17,7 +17,7 @@ func New() *cobra.Command { Short: `Databricks Account Commands`, } - {{range .Services}}{{if and .IsAccounts (not .HasParent)}}{{if not (in $excludes .KebabName) -}} + {{range .Services}}{{if and .IsAccounts (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) -}} cmd.AddCommand({{.SnakeName}}.New()) {{end}}{{end}}{{end}} diff --git a/.codegen/cmds-workspace.go.tmpl b/.codegen/cmds-workspace.go.tmpl index 244dde61a..e29f05a55 100644 --- a/.codegen/cmds-workspace.go.tmpl +++ b/.codegen/cmds-workspace.go.tmpl @@ -14,14 +14,14 @@ package workspace import ( "github.com/databricks/cli/cmd/root" - {{range .Services}}{{if and (not .IsAccounts) (not .HasParent)}}{{if not (in $excludes .KebabName) }} + {{range .Services}}{{if and (not .IsAccounts) (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) }} {{.SnakeName}} "github.com/databricks/cli/cmd/workspace/{{.KebabName}}"{{end}}{{end}}{{end}} ) func All() []*cobra.Command { var out []*cobra.Command - {{range .Services}}{{if and (not .IsAccounts) (not .HasParent)}}{{if not (in $excludes .KebabName) -}} + {{range .Services}}{{if and (not .IsAccounts) (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) -}} out = append(out, {{.SnakeName}}.New()) {{end}}{{end}}{{end}} diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index ad482ebe6..111745e4f 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -22,6 +22,7 @@ import ( "dbsql-permissions" "account-access-control-proxy" "files" + "serving-endpoints-data-plane" }} {{if not (in $excludes .KebabName) }} diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index ab948b8b7..380be0545 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -79,6 +79,17 @@ "experimental": { "description": "", "properties": { + "pydabs": { + "description": "", + "properties": { + "enabled": { + "description": "" + }, + "venv_path": { + "description": "" + } + } + }, "python_wheel_wrapper": { "description": "" }, @@ -236,6 +247,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -853,6 +870,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -1595,6 +1618,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -1634,6 +1668,17 @@ "pause_status": { "description": "Whether this trigger is paused or not." }, + "periodic": { + "description": "Periodic trigger settings.", + "properties": { + "interval": { + "description": "The interval at which the trigger should run." + }, + "unit": { + "description": "The unit of time for the interval." + } + } + }, "table": { "description": "Old table trigger settings name. Deprecated in favor of `table_update`.", "properties": { @@ -1712,6 +1757,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -1740,16 +1796,16 @@ "description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.", "properties": { "catalog_name": { - "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set." + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if the inference table is already enabled." }, "enabled": { - "description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again." + "description": "Indicates whether the inference table is enabled." }, "schema_name": { - "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set." + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if the inference table is already enabled." }, "table_name_prefix": { - "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set." + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled." } } }, @@ -2623,7 +2679,7 @@ } }, "notebook": { - "description": "The path to a notebook that defines a pipeline and is stored in the \u003cDatabricks\u003e workspace.\n", + "description": "The path to a notebook that defines a pipeline and is stored in the Databricks workspace.\n", "properties": { "path": { "description": "The absolute path of the notebook." @@ -3167,6 +3223,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -3784,6 +3846,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -4526,6 +4594,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -4565,6 +4644,17 @@ "pause_status": { "description": "Whether this trigger is paused or not." }, + "periodic": { + "description": "Periodic trigger settings.", + "properties": { + "interval": { + "description": "The interval at which the trigger should run." + }, + "unit": { + "description": "The unit of time for the interval." + } + } + }, "table": { "description": "Old table trigger settings name. Deprecated in favor of `table_update`.", "properties": { @@ -4643,6 +4733,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -4671,16 +4772,16 @@ "description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.", "properties": { "catalog_name": { - "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set." + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if the inference table is already enabled." }, "enabled": { - "description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again." + "description": "Indicates whether the inference table is enabled." }, "schema_name": { - "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set." + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if the inference table is already enabled." }, "table_name_prefix": { - "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set." + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled." } } }, @@ -5554,7 +5655,7 @@ } }, "notebook": { - "description": "The path to a notebook that defines a pipeline and is stored in the \u003cDatabricks\u003e workspace.\n", + "description": "The path to a notebook that defines a pipeline and is stored in the Databricks workspace.\n", "properties": { "path": { "description": "The absolute path of the notebook." diff --git a/cmd/workspace/alerts/alerts.go b/cmd/workspace/alerts/alerts.go index d4a7d02af..61c1e0eab 100755 --- a/cmd/workspace/alerts/alerts.go +++ b/cmd/workspace/alerts/alerts.go @@ -24,7 +24,12 @@ func New() *cobra.Command { Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the sql_task type of - the Jobs API, e.g. :method:jobs/create.`, + the Jobs API, e.g. :method:jobs/create. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -73,7 +78,12 @@ func newCreate() *cobra.Command { Creates an alert. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies users or notification - destinations if the condition was met.` + destinations if the condition was met. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -131,8 +141,13 @@ func newDelete() *cobra.Command { cmd.Long = `Delete an alert. Deletes an alert. Deleted alerts are no longer accessible and cannot be - restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to - the trash.` + restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to + the trash. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -199,7 +214,12 @@ func newGet() *cobra.Command { cmd.Short = `Get an alert.` cmd.Long = `Get an alert. - Gets an alert.` + Gets an alert. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -261,7 +281,12 @@ func newList() *cobra.Command { cmd.Short = `Get alerts.` cmd.Long = `Get alerts. - Gets a list of alerts.` + Gets a list of alerts. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -312,7 +337,12 @@ func newUpdate() *cobra.Command { cmd.Short = `Update an alert.` cmd.Long = `Update an alert. - Updates an alert.` + Updates an alert. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 46568e521..1572d4f4b 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -42,6 +42,7 @@ func New() *cobra.Command { cmd.AddCommand(newGetEnvironment()) cmd.AddCommand(newList()) cmd.AddCommand(newListDeployments()) + cmd.AddCommand(newStart()) cmd.AddCommand(newStop()) cmd.AddCommand(newUpdate()) @@ -615,6 +616,64 @@ func newListDeployments() *cobra.Command { return cmd } +// start start command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var startOverrides []func( + *cobra.Command, + *serving.StartAppRequest, +) + +func newStart() *cobra.Command { + cmd := &cobra.Command{} + + var startReq serving.StartAppRequest + + // TODO: short flags + + cmd.Use = "start NAME" + cmd.Short = `Start an app.` + cmd.Long = `Start an app. + + Start the last active deployment of the app in the workspace. + + Arguments: + NAME: The name of the app.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + startReq.Name = args[0] + + response, err := w.Apps.Start(ctx, startReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range startOverrides { + fn(cmd, &startReq) + } + + return cmd +} + // start stop command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 8085b69e2..a17bb0072 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -273,6 +273,8 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include catalogs in the response for which the principal can only access selective metadata for.`) + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of catalogs to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) cmd.Use = "list" cmd.Short = `List catalogs.` diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index 1a143538b..fcab0aa2a 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -268,8 +268,8 @@ func newList() *cobra.Command { Fetch a paginated list of dashboard objects. - ### **Warning: Calling this API concurrently 10 or more times could result in - throttling, service degradation, or a temporary ban.**` + **Warning**: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/data-sources/data-sources.go b/cmd/workspace/data-sources/data-sources.go index 0f0f8541e..f310fe50a 100755 --- a/cmd/workspace/data-sources/data-sources.go +++ b/cmd/workspace/data-sources/data-sources.go @@ -25,7 +25,12 @@ func New() *cobra.Command { This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or grep to search the response from this API for the name of your SQL warehouse - as it appears in Databricks SQL.`, + as it appears in Databricks SQL. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -60,7 +65,12 @@ func newList() *cobra.Command { Retrieves a full list of SQL warehouses available in this workspace. All fields that appear in this API response are enumerated for clarity. However, - you need only a SQL warehouse's id to create new queries against it.` + you need only a SQL warehouse's id to create new queries against it. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index bd63d3fa4..8f0dd346a 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -348,6 +348,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`) // TODO: complex arg: encryption_details cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the external location.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Indicates whether the external location is read-only.`) diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index 1aa6daf38..c8de48797 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -69,6 +69,8 @@ func newCreate() *cobra.Command { cmd.Short = `Create a function.` cmd.Long = `Create a function. + **WARNING: This API is experimental and will change in future versions** + Creates a new function The user must have the following permissions in order for the function to be diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index e31c3f086..50a045921 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -1502,24 +1502,15 @@ func newSubmit() *cobra.Command { cmd.Flags().Var(&submitJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: access_control_list - // TODO: complex arg: condition_task - // TODO: complex arg: dbt_task // TODO: complex arg: email_notifications + // TODO: array: environments // TODO: complex arg: git_source // TODO: complex arg: health cmd.Flags().StringVar(&submitReq.IdempotencyToken, "idempotency-token", submitReq.IdempotencyToken, `An optional token that can be used to guarantee the idempotency of job run requests.`) - // TODO: complex arg: notebook_task // TODO: complex arg: notification_settings - // TODO: complex arg: pipeline_task - // TODO: complex arg: python_wheel_task // TODO: complex arg: queue // TODO: complex arg: run_as - // TODO: complex arg: run_job_task cmd.Flags().StringVar(&submitReq.RunName, "run-name", submitReq.RunName, `An optional name for the run.`) - // TODO: complex arg: spark_jar_task - // TODO: complex arg: spark_python_task - // TODO: complex arg: spark_submit_task - // TODO: complex arg: sql_task // TODO: array: tasks cmd.Flags().IntVar(&submitReq.TimeoutSeconds, "timeout-seconds", submitReq.TimeoutSeconds, `An optional timeout applied to each run of this job.`) // TODO: complex arg: webhook_notifications diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 566853ff9..36eab0e7f 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -31,13 +31,23 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateSchedule()) + cmd.AddCommand(newCreateSubscription()) + cmd.AddCommand(newDeleteSchedule()) + cmd.AddCommand(newDeleteSubscription()) cmd.AddCommand(newGet()) cmd.AddCommand(newGetPublished()) + cmd.AddCommand(newGetSchedule()) + cmd.AddCommand(newGetSubscription()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListSchedules()) + cmd.AddCommand(newListSubscriptions()) cmd.AddCommand(newMigrate()) cmd.AddCommand(newPublish()) cmd.AddCommand(newTrash()) cmd.AddCommand(newUnpublish()) cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdateSchedule()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { @@ -126,6 +136,277 @@ func newCreate() *cobra.Command { return cmd } +// start create-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createScheduleOverrides []func( + *cobra.Command, + *dashboards.CreateScheduleRequest, +) + +func newCreateSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var createScheduleReq dashboards.CreateScheduleRequest + var createScheduleJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createScheduleReq.DisplayName, "display-name", createScheduleReq.DisplayName, `The display name for schedule.`) + cmd.Flags().Var(&createScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + + cmd.Use = "create-schedule DASHBOARD_ID" + cmd.Short = `Create dashboard schedule.` + cmd.Long = `Create dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createScheduleJson.Unmarshal(&createScheduleReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + createScheduleReq.DashboardId = args[0] + + response, err := w.Lakeview.CreateSchedule(ctx, createScheduleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createScheduleOverrides { + fn(cmd, &createScheduleReq) + } + + return cmd +} + +// start create-subscription command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createSubscriptionOverrides []func( + *cobra.Command, + *dashboards.CreateSubscriptionRequest, +) + +func newCreateSubscription() *cobra.Command { + cmd := &cobra.Command{} + + var createSubscriptionReq dashboards.CreateSubscriptionRequest + var createSubscriptionJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createSubscriptionJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create-subscription DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Create schedule subscription.` + cmd.Long = `Create schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createSubscriptionJson.Unmarshal(&createSubscriptionReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + createSubscriptionReq.DashboardId = args[0] + createSubscriptionReq.ScheduleId = args[1] + + response, err := w.Lakeview.CreateSubscription(ctx, createSubscriptionReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createSubscriptionOverrides { + fn(cmd, &createSubscriptionReq) + } + + return cmd +} + +// start delete-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteScheduleOverrides []func( + *cobra.Command, + *dashboards.DeleteScheduleRequest, +) + +func newDeleteSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var deleteScheduleReq dashboards.DeleteScheduleRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteScheduleReq.Etag, "etag", deleteScheduleReq.Etag, `The etag for the schedule.`) + + cmd.Use = "delete-schedule DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Delete dashboard schedule.` + cmd.Long = `Delete dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteScheduleReq.DashboardId = args[0] + deleteScheduleReq.ScheduleId = args[1] + + err = w.Lakeview.DeleteSchedule(ctx, deleteScheduleReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteScheduleOverrides { + fn(cmd, &deleteScheduleReq) + } + + return cmd +} + +// start delete-subscription command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteSubscriptionOverrides []func( + *cobra.Command, + *dashboards.DeleteSubscriptionRequest, +) + +func newDeleteSubscription() *cobra.Command { + cmd := &cobra.Command{} + + var deleteSubscriptionReq dashboards.DeleteSubscriptionRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteSubscriptionReq.Etag, "etag", deleteSubscriptionReq.Etag, `The etag for the subscription.`) + + cmd.Use = "delete-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID" + cmd.Short = `Delete schedule subscription.` + cmd.Long = `Delete schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. + SUBSCRIPTION_ID: UUID identifying the subscription.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteSubscriptionReq.DashboardId = args[0] + deleteSubscriptionReq.ScheduleId = args[1] + deleteSubscriptionReq.SubscriptionId = args[2] + + err = w.Lakeview.DeleteSubscription(ctx, deleteSubscriptionReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteSubscriptionOverrides { + fn(cmd, &deleteSubscriptionReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. @@ -242,6 +523,303 @@ func newGetPublished() *cobra.Command { return cmd } +// start get-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getScheduleOverrides []func( + *cobra.Command, + *dashboards.GetScheduleRequest, +) + +func newGetSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var getScheduleReq dashboards.GetScheduleRequest + + // TODO: short flags + + cmd.Use = "get-schedule DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Get dashboard schedule.` + cmd.Long = `Get dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getScheduleReq.DashboardId = args[0] + getScheduleReq.ScheduleId = args[1] + + response, err := w.Lakeview.GetSchedule(ctx, getScheduleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getScheduleOverrides { + fn(cmd, &getScheduleReq) + } + + return cmd +} + +// start get-subscription command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getSubscriptionOverrides []func( + *cobra.Command, + *dashboards.GetSubscriptionRequest, +) + +func newGetSubscription() *cobra.Command { + cmd := &cobra.Command{} + + var getSubscriptionReq dashboards.GetSubscriptionRequest + + // TODO: short flags + + cmd.Use = "get-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID" + cmd.Short = `Get schedule subscription.` + cmd.Long = `Get schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. + SUBSCRIPTION_ID: UUID identifying the subscription.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getSubscriptionReq.DashboardId = args[0] + getSubscriptionReq.ScheduleId = args[1] + getSubscriptionReq.SubscriptionId = args[2] + + response, err := w.Lakeview.GetSubscription(ctx, getSubscriptionReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getSubscriptionOverrides { + fn(cmd, &getSubscriptionReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *dashboards.ListDashboardsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq dashboards.ListDashboardsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The number of dashboards to return per page.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token, received from a previous ListDashboards call.`) + cmd.Flags().BoolVar(&listReq.ShowTrashed, "show-trashed", listReq.ShowTrashed, `The flag to include dashboards located in the trash.`) + cmd.Flags().Var(&listReq.View, "view", `Indicates whether to include all metadata from the dashboard in the response. Supported values: [DASHBOARD_VIEW_BASIC, DASHBOARD_VIEW_FULL]`) + + cmd.Use = "list" + cmd.Short = `List dashboards.` + cmd.Long = `List dashboards.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.Lakeview.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start list-schedules command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listSchedulesOverrides []func( + *cobra.Command, + *dashboards.ListSchedulesRequest, +) + +func newListSchedules() *cobra.Command { + cmd := &cobra.Command{} + + var listSchedulesReq dashboards.ListSchedulesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listSchedulesReq.PageSize, "page-size", listSchedulesReq.PageSize, `The number of schedules to return per page.`) + cmd.Flags().StringVar(&listSchedulesReq.PageToken, "page-token", listSchedulesReq.PageToken, `A page token, received from a previous ListSchedules call.`) + + cmd.Use = "list-schedules DASHBOARD_ID" + cmd.Short = `List dashboard schedules.` + cmd.Long = `List dashboard schedules. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listSchedulesReq.DashboardId = args[0] + + response := w.Lakeview.ListSchedules(ctx, listSchedulesReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listSchedulesOverrides { + fn(cmd, &listSchedulesReq) + } + + return cmd +} + +// start list-subscriptions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listSubscriptionsOverrides []func( + *cobra.Command, + *dashboards.ListSubscriptionsRequest, +) + +func newListSubscriptions() *cobra.Command { + cmd := &cobra.Command{} + + var listSubscriptionsReq dashboards.ListSubscriptionsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listSubscriptionsReq.PageSize, "page-size", listSubscriptionsReq.PageSize, `The number of subscriptions to return per page.`) + cmd.Flags().StringVar(&listSubscriptionsReq.PageToken, "page-token", listSubscriptionsReq.PageToken, `A page token, received from a previous ListSubscriptions call.`) + + cmd.Use = "list-subscriptions DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `List schedule subscriptions.` + cmd.Long = `List schedule subscriptions. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listSubscriptionsReq.DashboardId = args[0] + listSubscriptionsReq.ScheduleId = args[1] + + response := w.Lakeview.ListSubscriptions(ctx, listSubscriptionsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listSubscriptionsOverrides { + fn(cmd, &listSubscriptionsReq) + } + + return cmd +} + // start migrate command // Slice with functions to override default command behavior. @@ -576,4 +1154,79 @@ func newUpdate() *cobra.Command { return cmd } +// start update-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateScheduleOverrides []func( + *cobra.Command, + *dashboards.UpdateScheduleRequest, +) + +func newUpdateSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var updateScheduleReq dashboards.UpdateScheduleRequest + var updateScheduleJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateScheduleReq.DisplayName, "display-name", updateScheduleReq.DisplayName, `The display name for schedule.`) + cmd.Flags().StringVar(&updateScheduleReq.Etag, "etag", updateScheduleReq.Etag, `The etag for the schedule.`) + cmd.Flags().Var(&updateScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + + cmd.Use = "update-schedule DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Update dashboard schedule.` + cmd.Long = `Update dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateScheduleJson.Unmarshal(&updateScheduleReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateScheduleReq.DashboardId = args[0] + updateScheduleReq.ScheduleId = args[1] + + response, err := w.Lakeview.UpdateSchedule(ctx, updateScheduleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateScheduleOverrides { + fn(cmd, &updateScheduleReq) + } + + return cmd +} + // end service Lakeview diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index b96eb7154..650131974 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -23,7 +23,12 @@ func New() *cobra.Command { Long: `These endpoints are used for CRUD operations on query definitions. Query definitions include the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the - sql_task type of the Jobs API, e.g. :method:jobs/create.`, + sql_task type of the Jobs API, e.g. :method:jobs/create. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -76,7 +81,12 @@ func newCreate() *cobra.Command { available SQL warehouses. Or you can copy the data_source_id from an existing query. - **Note**: You cannot add a visualization until you create the query.` + **Note**: You cannot add a visualization until you create the query. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -135,7 +145,12 @@ func newDelete() *cobra.Command { Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. The trash is - deleted after 30 days.` + deleted after 30 days. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -203,7 +218,12 @@ func newGet() *cobra.Command { cmd.Long = `Get a query definition. Retrieve a query object definition along with contextual permissions - information about the currently authenticated user.` + information about the currently authenticated user. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -278,8 +298,13 @@ func newList() *cobra.Command { Gets a list of queries. Optionally, this list can be filtered by a search term. - ### **Warning: Calling this API concurrently 10 or more times could result in - throttling, service degradation, or a temporary ban.**` + **Warning**: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -330,7 +355,12 @@ func newRestore() *cobra.Command { cmd.Long = `Restore a query. Restore a query that has been moved to the trash. A restored query appears in - list views and searches. You can use restored queries for alerts.` + list views and searches. You can use restored queries for alerts. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -409,7 +439,12 @@ func newUpdate() *cobra.Command { Modify this query definition. - **Note**: You cannot undo this operation.` + **Note**: You cannot undo this operation. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 325945031..18656a61c 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -366,6 +366,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`) // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index dff8176ea..158474770 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -42,6 +42,7 @@ func New() *cobra.Command { cmd.AddCommand(newGetIndex()) cmd.AddCommand(newListIndexes()) cmd.AddCommand(newQueryIndex()) + cmd.AddCommand(newQueryNextPage()) cmd.AddCommand(newScanIndex()) cmd.AddCommand(newSyncIndex()) cmd.AddCommand(newUpsertDataVectorIndex()) @@ -416,6 +417,7 @@ func newQueryIndex() *cobra.Command { cmd.Flags().StringVar(&queryIndexReq.FiltersJson, "filters-json", queryIndexReq.FiltersJson, `JSON string representing query filters.`) cmd.Flags().IntVar(&queryIndexReq.NumResults, "num-results", queryIndexReq.NumResults, `Number of results to return.`) cmd.Flags().StringVar(&queryIndexReq.QueryText, "query-text", queryIndexReq.QueryText, `Query text.`) + cmd.Flags().StringVar(&queryIndexReq.QueryType, "query-type", queryIndexReq.QueryType, `The query type to use.`) // TODO: array: query_vector cmd.Flags().Float64Var(&queryIndexReq.ScoreThreshold, "score-threshold", queryIndexReq.ScoreThreshold, `Threshold for the approximate nearest neighbor search.`) @@ -469,6 +471,76 @@ func newQueryIndex() *cobra.Command { return cmd } +// start query-next-page command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var queryNextPageOverrides []func( + *cobra.Command, + *vectorsearch.QueryVectorIndexNextPageRequest, +) + +func newQueryNextPage() *cobra.Command { + cmd := &cobra.Command{} + + var queryNextPageReq vectorsearch.QueryVectorIndexNextPageRequest + var queryNextPageJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&queryNextPageJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&queryNextPageReq.EndpointName, "endpoint-name", queryNextPageReq.EndpointName, `Name of the endpoint.`) + cmd.Flags().StringVar(&queryNextPageReq.PageToken, "page-token", queryNextPageReq.PageToken, `Page token returned from previous QueryVectorIndex or QueryVectorIndexNextPage API.`) + + cmd.Use = "query-next-page INDEX_NAME" + cmd.Short = `Query next page.` + cmd.Long = `Query next page. + + Use next_page_token returned from previous QueryVectorIndex or + QueryVectorIndexNextPage request to fetch next page of results. + + Arguments: + INDEX_NAME: Name of the vector index to query.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = queryNextPageJson.Unmarshal(&queryNextPageReq) + if err != nil { + return err + } + } + queryNextPageReq.IndexName = args[0] + + response, err := w.VectorSearchIndexes.QueryNextPage(ctx, queryNextPageReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range queryNextPageOverrides { + fn(cmd, &queryNextPageReq) + } + + return cmd +} + // start scan-index command // Slice with functions to override default command behavior. diff --git a/go.mod b/go.mod index bcfbae470..2dfbf46cf 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.42.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.43.0 // Apache 2.0 github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 0f4f62d90..864b7919b 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.42.0 h1:WKdoqnvb+jvsR9+IYkC3P4BH5eJHRzVOr59y3mCoY+s= -github.com/databricks/databricks-sdk-go v0.42.0/go.mod h1:a9rr0FOHLL26kOjQjZZVFjIYmRABCbrAWVeundDEVG8= +github.com/databricks/databricks-sdk-go v0.43.0 h1:x4laolWhYlsQg2t8yWEGyRPZy4/Wv3pKnLEoJfVin7I= +github.com/databricks/databricks-sdk-go v0.43.0/go.mod h1:a9rr0FOHLL26kOjQjZZVFjIYmRABCbrAWVeundDEVG8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From dac5f09556875003986832f74829bdbc326e725f Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 25 Jun 2024 19:10:21 +0530 Subject: [PATCH 245/286] Retain location metadata for values in `convert.FromTyped` (#1523) ## Changes There are four different treatments location metadata can receive in the `convert.FromTyped` method. 1. Location metadata is **retained** for maps, structs and slices if the value is **not nil** 2. Location metadata is **lost** for maps, structs and slices if the value is **is nil** 3. Location metadata is **retained** if a scalar type (eg. bool, string etc) does not change. 4. Location metadata is **lost** if the value for a scalar type changes. This PR ensures that location metadata is not lost in any case; that is, it's always preserved. For (2), this serves as a bug fix so that location information is not lost on conversion to and from typed for nil values of complex types (struct, slices, and maps). For (4) this is a change in semantics. For primitive values modified in a `typed` mutator, any references to `.Location()` for computed primitive fields will now return associated YAML location metadata (if any) instead of an empty location. While arguable, these semantics are OK since: 1. Situations like these will be rare. 2. Knowing the YAML location (if any) is better than not knowing the location at all. These locations are typically visible to the user in errors and warnings. ## Tests Unit tests --- libs/dyn/convert/from_typed.go | 34 +++++---- libs/dyn/convert/from_typed_test.go | 109 +++++++++++++++++++++------- 2 files changed, 105 insertions(+), 38 deletions(-) diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index af49a07ab..258ade4e8 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -42,7 +42,7 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, // Dereference pointer if necessary for srcv.Kind() == reflect.Pointer { if srcv.IsNil() { - return dyn.NilValue, nil + return dyn.NilValue.WithLocation(ref.Location()), nil } srcv = srcv.Elem() @@ -55,27 +55,35 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, } } + var v dyn.Value + var err error switch srcv.Kind() { case reflect.Struct: - return fromTypedStruct(srcv, ref, options...) + v, err = fromTypedStruct(srcv, ref, options...) case reflect.Map: - return fromTypedMap(srcv, ref) + v, err = fromTypedMap(srcv, ref) case reflect.Slice: - return fromTypedSlice(srcv, ref) + v, err = fromTypedSlice(srcv, ref) case reflect.String: - return fromTypedString(srcv, ref, options...) + v, err = fromTypedString(srcv, ref, options...) case reflect.Bool: - return fromTypedBool(srcv, ref, options...) + v, err = fromTypedBool(srcv, ref, options...) case reflect.Int, reflect.Int32, reflect.Int64: - return fromTypedInt(srcv, ref, options...) + v, err = fromTypedInt(srcv, ref, options...) case reflect.Float32, reflect.Float64: - return fromTypedFloat(srcv, ref, options...) + v, err = fromTypedFloat(srcv, ref, options...) case reflect.Invalid: // If the value is untyped and not set (e.g. any type with nil value), we return nil. - return dyn.NilValue, nil + v, err = dyn.NilValue, nil + default: + return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) } - return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) + // Ensure the location metadata is retained. + if err != nil { + return dyn.InvalidValue, err + } + return v.WithLocation(ref.Location()), err } func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -117,7 +125,7 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptio // 2. The reference is a map (i.e. the struct was and still is empty). // 3. The "includeZeroValues" option is set (i.e. the struct is a non-nil pointer). if out.Len() > 0 || ref.Kind() == dyn.KindMap || slices.Contains(options, includeZeroValues) { - return dyn.NewValue(out, ref.Location()), nil + return dyn.V(out), nil } // Otherwise, return nil. @@ -164,7 +172,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out.Set(refk, nv) } - return dyn.NewValue(out, ref.Location()), nil + return dyn.V(out), nil } func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { @@ -199,7 +207,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out[i] = nv } - return dyn.NewValue(out, ref.Location()), nil + return dyn.V(out), nil } func fromTypedString(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index e5447fe80..c2c17a57e 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -49,7 +49,7 @@ func TestFromTypedStructPointerZeroFields(t *testing.T) { require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) - // For an initialized pointer with a nil reference we expect a nil. + // For an initialized pointer with a nil reference we expect an empty map. src = &Tmp{} nv, err = FromTyped(src, dyn.NilValue) require.NoError(t, err) @@ -103,7 +103,7 @@ func TestFromTypedStructSetFields(t *testing.T) { }), nv) } -func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedStructSetFieldsRetainLocation(t *testing.T) { type Tmp struct { Foo string `json:"foo"` Bar string `json:"bar"` @@ -122,11 +122,9 @@ func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. + // Assert foo and bar have retained their location. assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) + assert.Equal(t, dyn.NewValue("qux", dyn.Location{File: "bar"}), nv.Get("bar")) } func TestFromTypedStringMapWithZeroValue(t *testing.T) { @@ -354,7 +352,7 @@ func TestFromTypedMapNonEmpty(t *testing.T) { }), nv) } -func TestFromTypedMapNonEmptyRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { var src = map[string]string{ "foo": "bar", "bar": "qux", @@ -368,11 +366,9 @@ func TestFromTypedMapNonEmptyRetainLocationIfUnchanged(t *testing.T) { nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. + // Assert foo and bar have retained their locations. assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) + assert.Equal(t, dyn.NewValue("qux", dyn.Location{File: "bar"}), nv.Get("bar")) } func TestFromTypedMapFieldWithZeroValue(t *testing.T) { @@ -429,7 +425,7 @@ func TestFromTypedSliceNonEmpty(t *testing.T) { }), nv) } -func TestFromTypedSliceNonEmptyRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { var src = []string{ "foo", "bar", @@ -437,17 +433,15 @@ func TestFromTypedSliceNonEmptyRetainLocationIfUnchanged(t *testing.T) { ref := dyn.V([]dyn.Value{ dyn.NewValue("foo", dyn.Location{File: "foo"}), - dyn.NewValue("baz", dyn.Location{File: "baz"}), + dyn.NewValue("bar", dyn.Location{File: "bar"}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. + // Assert foo and bar have retained their locations. assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv.Index(0)) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("bar", dyn.Location{}), nv.Index(1)) + assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "bar"}), nv.Index(1)) } func TestFromTypedStringEmpty(t *testing.T) { @@ -482,12 +476,20 @@ func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V("new"), nv) } -func TestFromTypedStringRetainsLocationsIfUnchanged(t *testing.T) { - var src string = "foo" +func TestFromTypedStringRetainsLocations(t *testing.T) { var ref = dyn.NewValue("foo", dyn.Location{File: "foo"}) + + // case: value has not been changed + var src string = "foo" nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = "bar" + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv) } func TestFromTypedStringTypeError(t *testing.T) { @@ -529,12 +531,20 @@ func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(true), nv) } -func TestFromTypedBoolRetainsLocationsIfUnchanged(t *testing.T) { - var src bool = true +func TestFromTypedBoolRetainsLocations(t *testing.T) { var ref = dyn.NewValue(true, dyn.Location{File: "foo"}) + + // case: value has not been changed + var src bool = true nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(true, dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = false + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(false, dyn.Location{File: "foo"}), nv) } func TestFromTypedBoolVariableReference(t *testing.T) { @@ -584,12 +594,20 @@ func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(int64(1234)), nv) } -func TestFromTypedIntRetainsLocationsIfUnchanged(t *testing.T) { - var src int = 1234 +func TestFromTypedIntRetainsLocations(t *testing.T) { var ref = dyn.NewValue(1234, dyn.Location{File: "foo"}) + + // case: value has not been changed + var src int = 1234 nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(1234, dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = 1235 + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(int64(1235), dyn.Location{File: "foo"}), nv) } func TestFromTypedIntVariableReference(t *testing.T) { @@ -639,12 +657,21 @@ func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(1.23), nv) } -func TestFromTypedFloatRetainsLocationsIfUnchanged(t *testing.T) { - var src float64 = 1.23 +func TestFromTypedFloatRetainsLocations(t *testing.T) { + var src float64 var ref = dyn.NewValue(1.23, dyn.Location{File: "foo"}) + + // case: value has not been changed + src = 1.23 nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(1.23, dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = 1.24 + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(1.24, dyn.Location{File: "foo"}), nv) } func TestFromTypedFloatVariableReference(t *testing.T) { @@ -669,3 +696,35 @@ func TestFromTypedAnyNil(t *testing.T) { require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) } + +func TestFromTypedNilPointerRetainsLocations(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + var src *Tmp + ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) +} + +func TestFromTypedNilMapRetainsLocation(t *testing.T) { + var src map[string]string + ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) +} + +func TestFromTypedNilSliceRetainsLocation(t *testing.T) { + var src []string + ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) +} From 482d83cba82bf87b6f9d9d52a04631c792183210 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 26 Jun 2024 11:26:40 +0200 Subject: [PATCH 246/286] Revert "Retain location metadata for values in `convert.FromTyped`" (#1528) ## Changes This reverts commit dac5f09556875003986832f74829bdbc326e725f (#1523). Retaining the location for nil values means equality checks no longer pass. We need #1520 to be merged first. ## Tests Integration test `TestAccPythonWheelTaskDeployAndRunWithWrapper`. --- libs/dyn/convert/from_typed.go | 34 ++++----- libs/dyn/convert/from_typed_test.go | 109 +++++++--------------------- 2 files changed, 38 insertions(+), 105 deletions(-) diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index 258ade4e8..af49a07ab 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -42,7 +42,7 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, // Dereference pointer if necessary for srcv.Kind() == reflect.Pointer { if srcv.IsNil() { - return dyn.NilValue.WithLocation(ref.Location()), nil + return dyn.NilValue, nil } srcv = srcv.Elem() @@ -55,35 +55,27 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, } } - var v dyn.Value - var err error switch srcv.Kind() { case reflect.Struct: - v, err = fromTypedStruct(srcv, ref, options...) + return fromTypedStruct(srcv, ref, options...) case reflect.Map: - v, err = fromTypedMap(srcv, ref) + return fromTypedMap(srcv, ref) case reflect.Slice: - v, err = fromTypedSlice(srcv, ref) + return fromTypedSlice(srcv, ref) case reflect.String: - v, err = fromTypedString(srcv, ref, options...) + return fromTypedString(srcv, ref, options...) case reflect.Bool: - v, err = fromTypedBool(srcv, ref, options...) + return fromTypedBool(srcv, ref, options...) case reflect.Int, reflect.Int32, reflect.Int64: - v, err = fromTypedInt(srcv, ref, options...) + return fromTypedInt(srcv, ref, options...) case reflect.Float32, reflect.Float64: - v, err = fromTypedFloat(srcv, ref, options...) + return fromTypedFloat(srcv, ref, options...) case reflect.Invalid: // If the value is untyped and not set (e.g. any type with nil value), we return nil. - v, err = dyn.NilValue, nil - default: - return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) + return dyn.NilValue, nil } - // Ensure the location metadata is retained. - if err != nil { - return dyn.InvalidValue, err - } - return v.WithLocation(ref.Location()), err + return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) } func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -125,7 +117,7 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptio // 2. The reference is a map (i.e. the struct was and still is empty). // 3. The "includeZeroValues" option is set (i.e. the struct is a non-nil pointer). if out.Len() > 0 || ref.Kind() == dyn.KindMap || slices.Contains(options, includeZeroValues) { - return dyn.V(out), nil + return dyn.NewValue(out, ref.Location()), nil } // Otherwise, return nil. @@ -172,7 +164,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out.Set(refk, nv) } - return dyn.V(out), nil + return dyn.NewValue(out, ref.Location()), nil } func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { @@ -207,7 +199,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out[i] = nv } - return dyn.V(out), nil + return dyn.NewValue(out, ref.Location()), nil } func fromTypedString(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index c2c17a57e..e5447fe80 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -49,7 +49,7 @@ func TestFromTypedStructPointerZeroFields(t *testing.T) { require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) - // For an initialized pointer with a nil reference we expect an empty map. + // For an initialized pointer with a nil reference we expect a nil. src = &Tmp{} nv, err = FromTyped(src, dyn.NilValue) require.NoError(t, err) @@ -103,7 +103,7 @@ func TestFromTypedStructSetFields(t *testing.T) { }), nv) } -func TestFromTypedStructSetFieldsRetainLocation(t *testing.T) { +func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { type Tmp struct { Foo string `json:"foo"` Bar string `json:"bar"` @@ -122,9 +122,11 @@ func TestFromTypedStructSetFieldsRetainLocation(t *testing.T) { nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo and bar have retained their location. + // Assert foo has retained its location. assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - assert.Equal(t, dyn.NewValue("qux", dyn.Location{File: "bar"}), nv.Get("bar")) + + // Assert bar lost its location (because it was overwritten). + assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) } func TestFromTypedStringMapWithZeroValue(t *testing.T) { @@ -352,7 +354,7 @@ func TestFromTypedMapNonEmpty(t *testing.T) { }), nv) } -func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { +func TestFromTypedMapNonEmptyRetainLocationIfUnchanged(t *testing.T) { var src = map[string]string{ "foo": "bar", "bar": "qux", @@ -366,9 +368,11 @@ func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo and bar have retained their locations. + // Assert foo has retained its location. assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - assert.Equal(t, dyn.NewValue("qux", dyn.Location{File: "bar"}), nv.Get("bar")) + + // Assert bar lost its location (because it was overwritten). + assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) } func TestFromTypedMapFieldWithZeroValue(t *testing.T) { @@ -425,7 +429,7 @@ func TestFromTypedSliceNonEmpty(t *testing.T) { }), nv) } -func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { +func TestFromTypedSliceNonEmptyRetainLocationIfUnchanged(t *testing.T) { var src = []string{ "foo", "bar", @@ -433,15 +437,17 @@ func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { ref := dyn.V([]dyn.Value{ dyn.NewValue("foo", dyn.Location{File: "foo"}), - dyn.NewValue("bar", dyn.Location{File: "bar"}), + dyn.NewValue("baz", dyn.Location{File: "baz"}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo and bar have retained their locations. + // Assert foo has retained its location. assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv.Index(0)) - assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "bar"}), nv.Index(1)) + + // Assert bar lost its location (because it was overwritten). + assert.Equal(t, dyn.NewValue("bar", dyn.Location{}), nv.Index(1)) } func TestFromTypedStringEmpty(t *testing.T) { @@ -476,20 +482,12 @@ func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V("new"), nv) } -func TestFromTypedStringRetainsLocations(t *testing.T) { - var ref = dyn.NewValue("foo", dyn.Location{File: "foo"}) - - // case: value has not been changed +func TestFromTypedStringRetainsLocationsIfUnchanged(t *testing.T) { var src string = "foo" + var ref = dyn.NewValue("foo", dyn.Location{File: "foo"}) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv) - - // case: value has been changed - src = "bar" - nv, err = FromTyped(src, ref) - require.NoError(t, err) - assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv) } func TestFromTypedStringTypeError(t *testing.T) { @@ -531,20 +529,12 @@ func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(true), nv) } -func TestFromTypedBoolRetainsLocations(t *testing.T) { - var ref = dyn.NewValue(true, dyn.Location{File: "foo"}) - - // case: value has not been changed +func TestFromTypedBoolRetainsLocationsIfUnchanged(t *testing.T) { var src bool = true + var ref = dyn.NewValue(true, dyn.Location{File: "foo"}) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(true, dyn.Location{File: "foo"}), nv) - - // case: value has been changed - src = false - nv, err = FromTyped(src, ref) - require.NoError(t, err) - assert.Equal(t, dyn.NewValue(false, dyn.Location{File: "foo"}), nv) } func TestFromTypedBoolVariableReference(t *testing.T) { @@ -594,20 +584,12 @@ func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(int64(1234)), nv) } -func TestFromTypedIntRetainsLocations(t *testing.T) { - var ref = dyn.NewValue(1234, dyn.Location{File: "foo"}) - - // case: value has not been changed +func TestFromTypedIntRetainsLocationsIfUnchanged(t *testing.T) { var src int = 1234 + var ref = dyn.NewValue(1234, dyn.Location{File: "foo"}) nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(1234, dyn.Location{File: "foo"}), nv) - - // case: value has been changed - src = 1235 - nv, err = FromTyped(src, ref) - require.NoError(t, err) - assert.Equal(t, dyn.NewValue(int64(1235), dyn.Location{File: "foo"}), nv) } func TestFromTypedIntVariableReference(t *testing.T) { @@ -657,21 +639,12 @@ func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(1.23), nv) } -func TestFromTypedFloatRetainsLocations(t *testing.T) { - var src float64 +func TestFromTypedFloatRetainsLocationsIfUnchanged(t *testing.T) { + var src float64 = 1.23 var ref = dyn.NewValue(1.23, dyn.Location{File: "foo"}) - - // case: value has not been changed - src = 1.23 nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(1.23, dyn.Location{File: "foo"}), nv) - - // case: value has been changed - src = 1.24 - nv, err = FromTyped(src, ref) - require.NoError(t, err) - assert.Equal(t, dyn.NewValue(1.24, dyn.Location{File: "foo"}), nv) } func TestFromTypedFloatVariableReference(t *testing.T) { @@ -696,35 +669,3 @@ func TestFromTypedAnyNil(t *testing.T) { require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) } - -func TestFromTypedNilPointerRetainsLocations(t *testing.T) { - type Tmp struct { - Foo string `json:"foo"` - Bar string `json:"bar"` - } - - var src *Tmp - ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) - - nv, err := FromTyped(src, ref) - require.NoError(t, err) - assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) -} - -func TestFromTypedNilMapRetainsLocation(t *testing.T) { - var src map[string]string - ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) - - nv, err := FromTyped(src, ref) - require.NoError(t, err) - assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) -} - -func TestFromTypedNilSliceRetainsLocation(t *testing.T) { - var src []string - ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) - - nv, err := FromTyped(src, ref) - require.NoError(t, err) - assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) -} From ce5a3f2ce6d2dd2cc39ed254a557f6bf68c8e9b7 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 26 Jun 2024 11:29:46 +0200 Subject: [PATCH 247/286] Upgrade TF provider to 1.48.0 (#1527) ## Changes This includes a fix for library order not being respected. ## Tests Manually confirmed the fix works in https://github.com/databricks/bundle-examples/pull/29. --- bundle/internal/tf/codegen/schema/version.go | 2 +- bundle/internal/tf/schema/config.go | 1 + .../schema/data_source_external_location.go | 1 + bundle/internal/tf/schema/data_source_job.go | 18 ++++++++++++++ .../schema/data_source_storage_credential.go | 1 + bundle/internal/tf/schema/resource_job.go | 24 +++++++++++++++++++ .../tf/schema/resource_online_table.go | 9 +++---- bundle/internal/tf/schema/root.go | 2 +- 8 files changed, 52 insertions(+), 6 deletions(-) diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 9595433a8..a99f15a40 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.47.0" +const ProviderVersion = "1.48.0" diff --git a/bundle/internal/tf/schema/config.go b/bundle/internal/tf/schema/config.go index d24d57339..a2de987ec 100644 --- a/bundle/internal/tf/schema/config.go +++ b/bundle/internal/tf/schema/config.go @@ -28,6 +28,7 @@ type Config struct { Profile string `json:"profile,omitempty"` RateLimit int `json:"rate_limit,omitempty"` RetryTimeoutSeconds int `json:"retry_timeout_seconds,omitempty"` + ServerlessComputeId string `json:"serverless_compute_id,omitempty"` SkipVerify bool `json:"skip_verify,omitempty"` Token string `json:"token,omitempty"` Username string `json:"username,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_external_location.go b/bundle/internal/tf/schema/data_source_external_location.go index 0fea6e529..a3e78cbd3 100644 --- a/bundle/internal/tf/schema/data_source_external_location.go +++ b/bundle/internal/tf/schema/data_source_external_location.go @@ -19,6 +19,7 @@ type DataSourceExternalLocationExternalLocationInfo struct { CreatedBy string `json:"created_by,omitempty"` CredentialId string `json:"credential_id,omitempty"` CredentialName string `json:"credential_name,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name,omitempty"` Owner string `json:"owner,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index d517bbe0f..727848ced 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -26,6 +26,7 @@ type DataSourceJobJobSettingsSettingsEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -500,6 +501,7 @@ type DataSourceJobJobSettingsSettingsTaskEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -529,6 +531,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications struc OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -824,6 +827,10 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSt Id string `json:"id"` } +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -832,6 +839,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications str OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } @@ -1163,6 +1171,10 @@ type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStart struct { Id string `json:"id"` } +type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -1171,6 +1183,7 @@ type DataSourceJobJobSettingsSettingsTaskWebhookNotifications struct { OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } @@ -1236,6 +1249,10 @@ type DataSourceJobJobSettingsSettingsWebhookNotificationsOnStart struct { Id string `json:"id"` } +type DataSourceJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -1244,6 +1261,7 @@ type DataSourceJobJobSettingsSettingsWebhookNotifications struct { OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []DataSourceJobJobSettingsSettingsWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []DataSourceJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_storage_credential.go b/bundle/internal/tf/schema/data_source_storage_credential.go index c7045d445..bf58f2726 100644 --- a/bundle/internal/tf/schema/data_source_storage_credential.go +++ b/bundle/internal/tf/schema/data_source_storage_credential.go @@ -36,6 +36,7 @@ type DataSourceStorageCredentialStorageCredentialInfo struct { CreatedAt int `json:"created_at,omitempty"` CreatedBy string `json:"created_by,omitempty"` Id string `json:"id,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name,omitempty"` Owner string `json:"owner,omitempty"` diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 0950073e2..42b648b0f 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -26,6 +26,7 @@ type ResourceJobEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -573,6 +574,7 @@ type ResourceJobTaskEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -602,6 +604,7 @@ type ResourceJobTaskForEachTaskTaskEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -943,6 +946,10 @@ type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStart struct { Id string `json:"id"` } +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -951,6 +958,7 @@ type ResourceJobTaskForEachTaskTaskWebhookNotifications struct { OnDurationWarningThresholdExceeded []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } @@ -1329,6 +1337,10 @@ type ResourceJobTaskWebhookNotificationsOnStart struct { Id string `json:"id"` } +type ResourceJobTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type ResourceJobTaskWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -1337,6 +1349,7 @@ type ResourceJobTaskWebhookNotifications struct { OnDurationWarningThresholdExceeded []ResourceJobTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []ResourceJobTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []ResourceJobTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []ResourceJobTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []ResourceJobTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } @@ -1378,6 +1391,11 @@ type ResourceJobTriggerFileArrival struct { WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` } +type ResourceJobTriggerPeriodic struct { + Interval int `json:"interval"` + Unit string `json:"unit"` +} + type ResourceJobTriggerTable struct { Condition string `json:"condition,omitempty"` MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` @@ -1395,6 +1413,7 @@ type ResourceJobTriggerTableUpdate struct { type ResourceJobTrigger struct { PauseStatus string `json:"pause_status,omitempty"` FileArrival *ResourceJobTriggerFileArrival `json:"file_arrival,omitempty"` + Periodic *ResourceJobTriggerPeriodic `json:"periodic,omitempty"` Table *ResourceJobTriggerTable `json:"table,omitempty"` TableUpdate *ResourceJobTriggerTableUpdate `json:"table_update,omitempty"` } @@ -1411,6 +1430,10 @@ type ResourceJobWebhookNotificationsOnStart struct { Id string `json:"id"` } +type ResourceJobWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type ResourceJobWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -1419,6 +1442,7 @@ type ResourceJobWebhookNotifications struct { OnDurationWarningThresholdExceeded []ResourceJobWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []ResourceJobWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []ResourceJobWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []ResourceJobWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []ResourceJobWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_online_table.go b/bundle/internal/tf/schema/resource_online_table.go index af8a348d3..de671eade 100644 --- a/bundle/internal/tf/schema/resource_online_table.go +++ b/bundle/internal/tf/schema/resource_online_table.go @@ -19,8 +19,9 @@ type ResourceOnlineTableSpec struct { } type ResourceOnlineTable struct { - Id string `json:"id,omitempty"` - Name string `json:"name"` - Status []any `json:"status,omitempty"` - Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + Status []any `json:"status,omitempty"` + TableServingUrl string `json:"table_serving_url,omitempty"` + Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` } diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 53f892030..39db3ea2f 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.47.0" +const ProviderVersion = "1.48.0" func NewRoot() *Root { return &Root{ From 5f4279160990b2fd4a4d522292e1ff832f307892 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 26 Jun 2024 12:25:32 +0200 Subject: [PATCH 248/286] Added support for complex variables (#1467) ## Changes Added support for complex variables Now it's possible to add and use complex variables as shown below ``` bundle: name: complex-variables resources: jobs: my_job: job_clusters: - job_cluster_key: key new_cluster: ${var.cluster} tasks: - task_key: test job_cluster_key: key variables: cluster: description: "A cluster definition" type: complex default: spark_version: "13.2.x-scala2.11" node_type_id: "Standard_DS3_v2" num_workers: 2 spark_conf: spark.speculation: true spark.databricks.delta.retentionDurationCheck.enabled: false ``` Fixes #1298 - [x] Support for complex variables - [x] Allow variable overrides (with shortcut) in targets - [x] Don't allow to provide complex variables via flag or env variable - [x] Fail validation if complex value is used but not `type: complex` provided - [x] Support using variables inside complex variables ## Tests Added unit tests --------- Co-authored-by: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> --- .../resolve_resource_references_test.go | 32 ++- .../mutator/resolve_variable_references.go | 55 +++++- .../resolve_variable_references_test.go | 185 +++++++++++++++++- bundle/config/mutator/set_variables.go | 8 +- bundle/config/mutator/set_variables_test.go | 47 +++-- bundle/config/root.go | 22 ++- bundle/config/root_test.go | 21 +- bundle/config/variable/variable.go | 39 +++- bundle/phases/initialize.go | 6 +- bundle/schema/schema_test.go | 96 ++++----- bundle/tests/complex_variables_test.go | 62 ++++++ bundle/tests/variables/complex/databricks.yml | 49 +++++ bundle/tests/variables_test.go | 10 +- libs/dyn/convert/from_typed.go | 22 ++- libs/dyn/convert/from_typed_test.go | 36 ++++ libs/dyn/convert/normalize.go | 27 +++ libs/dyn/convert/normalize_test.go | 140 +++++++++++++ libs/dyn/convert/to_typed.go | 25 +++ libs/dyn/convert/to_typed_test.go | 22 +++ libs/dyn/dynvar/ref.go | 2 +- libs/dyn/dynvar/resolve_test.go | 60 ++++++ 21 files changed, 853 insertions(+), 113 deletions(-) create mode 100644 bundle/tests/complex_variables_test.go create mode 100644 bundle/tests/variables/complex/databricks.yml diff --git a/bundle/config/mutator/resolve_resource_references_test.go b/bundle/config/mutator/resolve_resource_references_test.go index 214b712e3..86a03b23e 100644 --- a/bundle/config/mutator/resolve_resource_references_test.go +++ b/bundle/config/mutator/resolve_resource_references_test.go @@ -35,7 +35,7 @@ func TestResolveClusterReference(t *testing.T) { }, }, "some-variable": { - Value: &justString, + Value: justString, }, }, }, @@ -53,8 +53,8 @@ func TestResolveClusterReference(t *testing.T) { diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) require.NoError(t, diags.Error()) - require.Equal(t, "1234-5678-abcd", *b.Config.Variables["my-cluster-id-1"].Value) - require.Equal(t, "9876-5432-xywz", *b.Config.Variables["my-cluster-id-2"].Value) + require.Equal(t, "1234-5678-abcd", b.Config.Variables["my-cluster-id-1"].Value) + require.Equal(t, "9876-5432-xywz", b.Config.Variables["my-cluster-id-2"].Value) } func TestResolveNonExistentClusterReference(t *testing.T) { @@ -69,7 +69,7 @@ func TestResolveNonExistentClusterReference(t *testing.T) { }, }, "some-variable": { - Value: &justString, + Value: justString, }, }, }, @@ -105,7 +105,7 @@ func TestNoLookupIfVariableIsSet(t *testing.T) { diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) require.NoError(t, diags.Error()) - require.Equal(t, "random value", *b.Config.Variables["my-cluster-id"].Value) + require.Equal(t, "random value", b.Config.Variables["my-cluster-id"].Value) } func TestResolveServicePrincipal(t *testing.T) { @@ -132,14 +132,11 @@ func TestResolveServicePrincipal(t *testing.T) { diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) require.NoError(t, diags.Error()) - require.Equal(t, "app-1234", *b.Config.Variables["my-sp"].Value) + require.Equal(t, "app-1234", b.Config.Variables["my-sp"].Value) } func TestResolveVariableReferencesInVariableLookups(t *testing.T) { - s := func(s string) *string { - return &s - } - + s := "bar" b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ @@ -147,7 +144,7 @@ func TestResolveVariableReferencesInVariableLookups(t *testing.T) { }, Variables: map[string]*variable.Variable{ "foo": { - Value: s("bar"), + Value: s, }, "lookup": { Lookup: &variable.Lookup{ @@ -168,7 +165,7 @@ func TestResolveVariableReferencesInVariableLookups(t *testing.T) { diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences())) require.NoError(t, diags.Error()) require.Equal(t, "cluster-bar-dev", b.Config.Variables["lookup"].Lookup.Cluster) - require.Equal(t, "1234-5678-abcd", *b.Config.Variables["lookup"].Value) + require.Equal(t, "1234-5678-abcd", b.Config.Variables["lookup"].Value) } func TestResolveLookupVariableReferencesInVariableLookups(t *testing.T) { @@ -197,22 +194,15 @@ func TestResolveLookupVariableReferencesInVariableLookups(t *testing.T) { } func TestNoResolveLookupIfVariableSetWithEnvVariable(t *testing.T) { - s := func(s string) *string { - return &s - } - b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ Target: "dev", }, Variables: map[string]*variable.Variable{ - "foo": { - Value: s("bar"), - }, "lookup": { Lookup: &variable.Lookup{ - Cluster: "cluster-${var.foo}-${bundle.target}", + Cluster: "cluster-${bundle.target}", }, }, }, @@ -227,5 +217,5 @@ func TestNoResolveLookupIfVariableSetWithEnvVariable(t *testing.T) { diags := bundle.Apply(ctx, b, bundle.Seq(SetVariables(), ResolveVariableReferencesInLookup(), ResolveResourceReferences())) require.NoError(t, diags.Error()) - require.Equal(t, "1234-5678-abcd", *b.Config.Variables["lookup"].Value) + require.Equal(t, "1234-5678-abcd", b.Config.Variables["lookup"].Value) } diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go index f7fce6c82..cddc85cba 100644 --- a/bundle/config/mutator/resolve_variable_references.go +++ b/bundle/config/mutator/resolve_variable_references.go @@ -17,6 +17,7 @@ type resolveVariableReferences struct { prefixes []string pattern dyn.Pattern lookupFn func(dyn.Value, dyn.Path) (dyn.Value, error) + skipFn func(dyn.Value) bool } func ResolveVariableReferences(prefixes ...string) bundle.Mutator { @@ -31,6 +32,18 @@ func ResolveVariableReferencesInLookup() bundle.Mutator { }, pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("lookup")), lookupFn: lookupForVariables} } +func ResolveVariableReferencesInComplexVariables() bundle.Mutator { + return &resolveVariableReferences{prefixes: []string{ + "bundle", + "workspace", + "variables", + }, + pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("value")), + lookupFn: lookupForComplexVariables, + skipFn: skipResolvingInNonComplexVariables, + } +} + func lookup(v dyn.Value, path dyn.Path) (dyn.Value, error) { // Future opportunity: if we lookup this path in both the given root // and the synthesized root, we know if it was explicitly set or implied to be empty. @@ -38,6 +51,34 @@ func lookup(v dyn.Value, path dyn.Path) (dyn.Value, error) { return dyn.GetByPath(v, path) } +func lookupForComplexVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { + if path[0].Key() != "variables" { + return lookup(v, path) + } + + varV, err := dyn.GetByPath(v, path[:len(path)-1]) + if err != nil { + return dyn.InvalidValue, err + } + + var vv variable.Variable + err = convert.ToTyped(&vv, varV) + if err != nil { + return dyn.InvalidValue, err + } + + if vv.Type == variable.VariableTypeComplex { + return dyn.InvalidValue, fmt.Errorf("complex variables cannot contain references to another complex variables") + } + + return lookup(v, path) +} + +func skipResolvingInNonComplexVariables(v dyn.Value) bool { + _, ok := v.AsMap() + return !ok +} + func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { if path[0].Key() != "variables" { return lookup(v, path) @@ -100,17 +141,27 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) // Resolve variable references in all values. return dynvar.Resolve(v, func(path dyn.Path) (dyn.Value, error) { // Rewrite the shorthand path ${var.foo} into ${variables.foo.value}. - if path.HasPrefix(varPath) && len(path) == 2 { - path = dyn.NewPath( + if path.HasPrefix(varPath) { + newPath := dyn.NewPath( dyn.Key("variables"), path[1], dyn.Key("value"), ) + + if len(path) > 2 { + newPath = newPath.Append(path[2:]...) + } + + path = newPath } // Perform resolution only if the path starts with one of the specified prefixes. for _, prefix := range prefixes { if path.HasPrefix(prefix) { + // Skip resolution if there is a skip function and it returns true. + if m.skipFn != nil && m.skipFn(v) { + return dyn.InvalidValue, dynvar.ErrSkipResolution + } return m.lookupFn(normalized, path) } } diff --git a/bundle/config/mutator/resolve_variable_references_test.go b/bundle/config/mutator/resolve_variable_references_test.go index 651ea3d2c..2b88a2495 100644 --- a/bundle/config/mutator/resolve_variable_references_test.go +++ b/bundle/config/mutator/resolve_variable_references_test.go @@ -43,10 +43,6 @@ func TestResolveVariableReferences(t *testing.T) { } func TestResolveVariableReferencesToBundleVariables(t *testing.T) { - s := func(s string) *string { - return &s - } - b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ @@ -57,7 +53,7 @@ func TestResolveVariableReferencesToBundleVariables(t *testing.T) { }, Variables: map[string]*variable.Variable{ "foo": { - Value: s("bar"), + Value: "bar", }, }, }, @@ -195,3 +191,182 @@ func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) { assert.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MaxWorkers) assert.Equal(t, 0.5, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.AzureAttributes.SpotBidMaxPrice) } + +func TestResolveComplexVariable(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "cluster": { + Value: map[string]any{ + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + NodeTypeId: "random", + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0]") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("new_cluster")), dyn.V("${var.cluster}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, ResolveVariableReferences("bundle", "workspace", "variables")) + require.NoError(t, diags.Error()) + require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NodeTypeId) + require.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NumWorkers) +} + +func TestResolveComplexVariableReferencesToFields(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "cluster": { + Value: map[string]any{ + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + NodeTypeId: "random", + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0].new_cluster") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("node_type_id")), dyn.V("${var.cluster.node_type_id}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, ResolveVariableReferences("bundle", "workspace", "variables")) + require.NoError(t, diags.Error()) + require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NodeTypeId) +} + +func TestResolveComplexVariableReferencesWithComplexVariablesError(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "cluster": { + Value: map[string]any{ + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + "spark_conf": "${var.spark_conf}", + }, + Type: variable.VariableTypeComplex, + }, + "spark_conf": { + Value: map[string]any{ + "spark.executor.memory": "4g", + "spark.executor.cores": "2", + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + NodeTypeId: "random", + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0]") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("new_cluster")), dyn.V("${var.cluster}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, bundle.Seq(ResolveVariableReferencesInComplexVariables(), ResolveVariableReferences("bundle", "workspace", "variables"))) + require.ErrorContains(t, diags.Error(), "complex variables cannot contain references to another complex variables") +} diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index 0cee24ab6..b3a9cf400 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -30,6 +30,10 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Di // case: read and set variable value from process environment envVarName := bundleVarPrefix + name if val, ok := env.Lookup(ctx, envVarName); ok { + if v.IsComplex() { + return diag.Errorf(`setting via environment variables (%s) is not supported for complex variable %s`, envVarName, name) + } + err := v.Set(val) if err != nil { return diag.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %v`, val, name, envVarName, err) @@ -45,9 +49,9 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Di // case: Set the variable to its default value if v.HasDefault() { - err := v.Set(*v.Default) + err := v.Set(v.Default) if err != nil { - return diag.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, *v.Default, name, err) + return diag.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, v.Default, name, err) } return nil } diff --git a/bundle/config/mutator/set_variables_test.go b/bundle/config/mutator/set_variables_test.go index ae4f79896..65dedee97 100644 --- a/bundle/config/mutator/set_variables_test.go +++ b/bundle/config/mutator/set_variables_test.go @@ -15,7 +15,7 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) { defaultVal := "default" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, + Default: defaultVal, } // set value for variable as an environment variable @@ -23,19 +23,19 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) { diags := setVariable(context.Background(), &variable, "foo") require.NoError(t, diags.Error()) - assert.Equal(t, *variable.Value, "process-env") + assert.Equal(t, variable.Value, "process-env") } func TestSetVariableUsingDefaultValue(t *testing.T) { defaultVal := "default" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, + Default: defaultVal, } diags := setVariable(context.Background(), &variable, "foo") require.NoError(t, diags.Error()) - assert.Equal(t, *variable.Value, "default") + assert.Equal(t, variable.Value, "default") } func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { @@ -43,15 +43,15 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { val := "assigned-value" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, - Value: &val, + Default: defaultVal, + Value: val, } // since a value is already assigned to the variable, it would not be overridden // by the default value diags := setVariable(context.Background(), &variable, "foo") require.NoError(t, diags.Error()) - assert.Equal(t, *variable.Value, "assigned-value") + assert.Equal(t, variable.Value, "assigned-value") } func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { @@ -59,8 +59,8 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { val := "assigned-value" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, - Value: &val, + Default: defaultVal, + Value: val, } // set value for variable as an environment variable @@ -70,7 +70,7 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { // by the value from environment diags := setVariable(context.Background(), &variable, "foo") require.NoError(t, diags.Error()) - assert.Equal(t, *variable.Value, "assigned-value") + assert.Equal(t, variable.Value, "assigned-value") } func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) { @@ -92,15 +92,15 @@ func TestSetVariablesMutator(t *testing.T) { Variables: map[string]*variable.Variable{ "a": { Description: "resolved to default value", - Default: &defaultValForA, + Default: defaultValForA, }, "b": { Description: "resolved from environment vairables", - Default: &defaultValForB, + Default: defaultValForB, }, "c": { Description: "has already been assigned a value", - Value: &valForC, + Value: valForC, }, }, }, @@ -110,7 +110,22 @@ func TestSetVariablesMutator(t *testing.T) { diags := bundle.Apply(context.Background(), b, SetVariables()) require.NoError(t, diags.Error()) - assert.Equal(t, "default-a", *b.Config.Variables["a"].Value) - assert.Equal(t, "env-var-b", *b.Config.Variables["b"].Value) - assert.Equal(t, "assigned-val-c", *b.Config.Variables["c"].Value) + assert.Equal(t, "default-a", b.Config.Variables["a"].Value) + assert.Equal(t, "env-var-b", b.Config.Variables["b"].Value) + assert.Equal(t, "assigned-val-c", b.Config.Variables["c"].Value) +} + +func TestSetComplexVariablesViaEnvVariablesIsNotAllowed(t *testing.T) { + defaultVal := "default" + variable := variable.Variable{ + Description: "a test variable", + Default: defaultVal, + Type: variable.VariableTypeComplex, + } + + // set value for variable as an environment variable + t.Setenv("BUNDLE_VAR_foo", "process-env") + + diags := setVariable(context.Background(), &variable, "foo") + assert.ErrorContains(t, diags.Error(), "setting via environment variables (BUNDLE_VAR_foo) is not supported for complex variable foo") } diff --git a/bundle/config/root.go b/bundle/config/root.go index 2ce3a1389..0def1167b 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -267,6 +267,11 @@ func (r *Root) InitializeVariables(vars []string) error { if _, ok := r.Variables[name]; !ok { return fmt.Errorf("variable %s has not been defined", name) } + + if r.Variables[name].IsComplex() { + return fmt.Errorf("setting variables of complex type via --var flag is not supported: %s", name) + } + err := r.Variables[name].Set(val) if err != nil { return fmt.Errorf("failed to assign %s to %s: %s", val, name, err) @@ -419,7 +424,7 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { } // For each variable, normalize its contents if it is a single string. - return dyn.Map(target, "variables", dyn.Foreach(func(_ dyn.Path, variable dyn.Value) (dyn.Value, error) { + return dyn.Map(target, "variables", dyn.Foreach(func(p dyn.Path, variable dyn.Value) (dyn.Value, error) { switch variable.Kind() { case dyn.KindString, dyn.KindBool, dyn.KindFloat, dyn.KindInt: @@ -430,6 +435,21 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { "default": variable, }, variable.Location()), nil + case dyn.KindMap, dyn.KindSequence: + // Check if the original definition of variable has a type field. + typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type"))) + if err != nil { + return variable, nil + } + + if typeV.MustString() == "complex" { + return dyn.NewValue(map[string]dyn.Value{ + "default": variable, + }, variable.Location()), nil + } + + return variable, nil + default: return variable, nil } diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index b56768848..27cc3d22b 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -51,7 +51,7 @@ func TestInitializeVariables(t *testing.T) { root := &Root{ Variables: map[string]*variable.Variable{ "foo": { - Default: &fooDefault, + Default: fooDefault, Description: "an optional variable since default is defined", }, "bar": { @@ -62,8 +62,8 @@ func TestInitializeVariables(t *testing.T) { err := root.InitializeVariables([]string{"foo=123", "bar=456"}) assert.NoError(t, err) - assert.Equal(t, "123", *(root.Variables["foo"].Value)) - assert.Equal(t, "456", *(root.Variables["bar"].Value)) + assert.Equal(t, "123", (root.Variables["foo"].Value)) + assert.Equal(t, "456", (root.Variables["bar"].Value)) } func TestInitializeVariablesWithAnEqualSignInValue(t *testing.T) { @@ -77,7 +77,7 @@ func TestInitializeVariablesWithAnEqualSignInValue(t *testing.T) { err := root.InitializeVariables([]string{"foo=123=567"}) assert.NoError(t, err) - assert.Equal(t, "123=567", *(root.Variables["foo"].Value)) + assert.Equal(t, "123=567", (root.Variables["foo"].Value)) } func TestInitializeVariablesInvalidFormat(t *testing.T) { @@ -119,3 +119,16 @@ func TestRootMergeTargetOverridesWithMode(t *testing.T) { require.NoError(t, root.MergeTargetOverrides("development")) assert.Equal(t, Development, root.Bundle.Mode) } + +func TestInitializeComplexVariablesViaFlagIsNotAllowed(t *testing.T) { + root := &Root{ + Variables: map[string]*variable.Variable{ + "foo": { + Type: variable.VariableTypeComplex, + }, + }, + } + + err := root.InitializeVariables([]string{"foo=123"}) + assert.ErrorContains(t, err, "setting variables of complex type via --var flag is not supported: foo") +} diff --git a/bundle/config/variable/variable.go b/bundle/config/variable/variable.go index 5e700a9b0..ba94f9c8a 100644 --- a/bundle/config/variable/variable.go +++ b/bundle/config/variable/variable.go @@ -2,12 +2,27 @@ package variable import ( "fmt" + "reflect" +) + +// We are using `any` because since introduction of complex variables, +// variables can be of any type. +// Type alias is used to make it easier to understand the code. +type VariableValue = any + +type VariableType string + +const ( + VariableTypeComplex VariableType = "complex" ) // An input variable for the bundle config type Variable struct { + // A type of the variable. This is used to validate the value of the variable + Type VariableType `json:"type,omitempty"` + // A default value which then makes the variable optional - Default *string `json:"default,omitempty"` + Default VariableValue `json:"default,omitempty"` // Documentation for this input variable Description string `json:"description,omitempty"` @@ -21,7 +36,7 @@ type Variable struct { // 4. Default value defined in variable definition // 5. Throw error, since if no default value is defined, then the variable // is required - Value *string `json:"value,omitempty" bundle:"readonly"` + Value VariableValue `json:"value,omitempty" bundle:"readonly"` // The value of this field will be used to lookup the resource by name // And assign the value of the variable to ID of the resource found. @@ -39,10 +54,24 @@ func (v *Variable) HasValue() bool { return v.Value != nil } -func (v *Variable) Set(val string) error { +func (v *Variable) Set(val VariableValue) error { if v.HasValue() { - return fmt.Errorf("variable has already been assigned value: %s", *v.Value) + return fmt.Errorf("variable has already been assigned value: %s", v.Value) } - v.Value = &val + + rv := reflect.ValueOf(val) + switch rv.Kind() { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + if v.Type != VariableTypeComplex { + return fmt.Errorf("variable type is not complex") + } + } + + v.Value = val + return nil } + +func (v *Variable) IsComplex() bool { + return v.Type == VariableTypeComplex +} diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index d96ee0ebf..79fca9df6 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -29,11 +29,13 @@ func Initialize() bundle.Mutator { mutator.ExpandWorkspaceRoot(), mutator.DefineDefaultWorkspacePaths(), mutator.SetVariables(), - // Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences - // and ResolveVariableReferences. See what is expected in PythonMutatorPhaseInit doc + // Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences, + // ResolveVariableReferencesInComplexVariables and ResolveVariableReferences. + // See what is expected in PythonMutatorPhaseInit doc pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseInit), mutator.ResolveVariableReferencesInLookup(), mutator.ResolveResourceReferences(), + mutator.ResolveVariableReferencesInComplexVariables(), mutator.ResolveVariableReferences( "bundle", "workspace", diff --git a/bundle/schema/schema_test.go b/bundle/schema/schema_test.go index ea4fd1020..6d9df0cc7 100644 --- a/bundle/schema/schema_test.go +++ b/bundle/schema/schema_test.go @@ -20,7 +20,7 @@ func TestIntSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }` @@ -47,7 +47,7 @@ func TestBooleanSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }` @@ -123,7 +123,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -134,7 +134,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -145,7 +145,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -156,7 +156,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -167,7 +167,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -178,7 +178,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -189,7 +189,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -200,7 +200,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -214,7 +214,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -225,7 +225,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -236,7 +236,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -247,7 +247,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -258,7 +258,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -326,7 +326,7 @@ func TestStructOfStructsSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -391,7 +391,7 @@ func TestStructOfMapsSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -481,7 +481,7 @@ func TestMapOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -518,7 +518,7 @@ func TestMapOfStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -556,7 +556,7 @@ func TestMapOfMapSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -661,7 +661,7 @@ func TestSliceOfMapSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -699,7 +699,7 @@ func TestSliceOfStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -757,7 +757,7 @@ func TestEmbeddedStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -797,7 +797,7 @@ func TestEmbeddedStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -892,7 +892,7 @@ func TestNonAnnotatedFieldsAreSkipped(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -934,7 +934,7 @@ func TestDashFieldsAreSkipped(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -987,7 +987,7 @@ func TestPointerInStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1004,7 +1004,7 @@ func TestPointerInStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1018,7 +1018,7 @@ func TestPointerInStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1035,7 +1035,7 @@ func TestPointerInStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1106,7 +1106,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1129,7 +1129,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1157,7 +1157,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1180,7 +1180,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1210,7 +1210,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1236,7 +1236,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1322,7 +1322,7 @@ func TestFieldsWithoutOmitEmptyAreRequired(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1333,7 +1333,7 @@ func TestFieldsWithoutOmitEmptyAreRequired(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1347,7 +1347,7 @@ func TestFieldsWithoutOmitEmptyAreRequired(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1429,7 +1429,7 @@ func TestDocIngestionForObject(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1512,7 +1512,7 @@ func TestDocIngestionForSlice(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1524,7 +1524,7 @@ func TestDocIngestionForSlice(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1611,7 +1611,7 @@ func TestDocIngestionForMap(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1623,7 +1623,7 @@ func TestDocIngestionForMap(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1683,7 +1683,7 @@ func TestDocIngestionForTopLevelPrimitive(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1761,7 +1761,7 @@ func TestInterfaceGeneratesEmptySchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1810,7 +1810,7 @@ func TestBundleReadOnlytag(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1870,7 +1870,7 @@ func TestBundleInternalTag(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, diff --git a/bundle/tests/complex_variables_test.go b/bundle/tests/complex_variables_test.go new file mode 100644 index 000000000..ffe80e418 --- /dev/null +++ b/bundle/tests/complex_variables_test.go @@ -0,0 +1,62 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/require" +) + +func TestComplexVariables(t *testing.T) { + b, diags := loadTargetWithDiags("variables/complex", "default") + require.Empty(t, diags) + + diags = bundle.Apply(context.Background(), b, bundle.Seq( + mutator.SetVariables(), + mutator.ResolveVariableReferencesInComplexVariables(), + mutator.ResolveVariableReferences( + "variables", + ), + )) + require.NoError(t, diags.Error()) + + require.Equal(t, "13.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) + require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) + require.Equal(t, 2, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) + require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) + + require.Equal(t, 3, len(b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries)) + require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ + Jar: "/path/to/jar", + }) + require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ + Egg: "/path/to/egg", + }) + require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ + Whl: "/path/to/whl", + }) + + require.Equal(t, "task with spark version 13.2.x-scala2.11 and jar /path/to/jar", b.Config.Resources.Jobs["my_job"].Tasks[0].TaskKey) +} + +func TestComplexVariablesOverride(t *testing.T) { + b, diags := loadTargetWithDiags("variables/complex", "dev") + require.Empty(t, diags) + + diags = bundle.Apply(context.Background(), b, bundle.Seq( + mutator.SetVariables(), + mutator.ResolveVariableReferencesInComplexVariables(), + mutator.ResolveVariableReferences( + "variables", + ), + )) + require.NoError(t, diags.Error()) + + require.Equal(t, "14.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) + require.Equal(t, "Standard_DS3_v3", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) + require.Equal(t, 4, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) + require.Equal(t, "false", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) +} diff --git a/bundle/tests/variables/complex/databricks.yml b/bundle/tests/variables/complex/databricks.yml new file mode 100644 index 000000000..f7535ad4b --- /dev/null +++ b/bundle/tests/variables/complex/databricks.yml @@ -0,0 +1,49 @@ +bundle: + name: complex-variables + +resources: + jobs: + my_job: + job_clusters: + - job_cluster_key: key + new_cluster: ${var.cluster} + tasks: + - task_key: test + job_cluster_key: key + libraries: ${variables.libraries.value} + task_key: "task with spark version ${var.cluster.spark_version} and jar ${var.libraries[0].jar}" + +variables: + node_type: + default: "Standard_DS3_v2" + cluster: + type: complex + description: "A cluster definition" + default: + spark_version: "13.2.x-scala2.11" + node_type_id: ${var.node_type} + num_workers: 2 + spark_conf: + spark.speculation: true + spark.databricks.delta.retentionDurationCheck.enabled: false + libraries: + type: complex + description: "A libraries definition" + default: + - jar: "/path/to/jar" + - egg: "/path/to/egg" + - whl: "/path/to/whl" + + +targets: + default: + dev: + variables: + node_type: "Standard_DS3_v3" + cluster: + spark_version: "14.2.x-scala2.11" + node_type_id: ${var.node_type} + num_workers: 4 + spark_conf: + spark.speculation: false + spark.databricks.delta.retentionDurationCheck.enabled: false diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index 09441483b..7cf0f72f0 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -109,8 +109,8 @@ func TestVariablesWithoutDefinition(t *testing.T) { require.NoError(t, diags.Error()) require.True(t, b.Config.Variables["a"].HasValue()) require.True(t, b.Config.Variables["b"].HasValue()) - assert.Equal(t, "foo", *b.Config.Variables["a"].Value) - assert.Equal(t, "bar", *b.Config.Variables["b"].Value) + assert.Equal(t, "foo", b.Config.Variables["a"].Value) + assert.Equal(t, "bar", b.Config.Variables["b"].Value) } func TestVariablesWithTargetLookupOverrides(t *testing.T) { @@ -140,9 +140,9 @@ func TestVariablesWithTargetLookupOverrides(t *testing.T) { )) require.NoError(t, diags.Error()) - assert.Equal(t, "4321", *b.Config.Variables["d"].Value) - assert.Equal(t, "1234", *b.Config.Variables["e"].Value) - assert.Equal(t, "9876", *b.Config.Variables["f"].Value) + assert.Equal(t, "4321", b.Config.Variables["d"].Value) + assert.Equal(t, "1234", b.Config.Variables["e"].Value) + assert.Equal(t, "9876", b.Config.Variables["f"].Value) } func TestVariableTargetOverrides(t *testing.T) { diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index af49a07ab..15c5b7978 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -81,6 +81,11 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } case dyn.KindMap, dyn.KindNil: default: return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) @@ -100,8 +105,13 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptio refv = dyn.NilValue } + var options []fromTypedOptions + if v.Kind() == reflect.Interface { + options = append(options, includeZeroValues) + } + // Convert the field taking into account the reference value (may be equal to config.NilValue). - nv, err := fromTyped(v.Interface(), refv) + nv, err := fromTyped(v.Interface(), refv, options...) if err != nil { return dyn.InvalidValue, err } @@ -127,6 +137,11 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptio func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } case dyn.KindMap, dyn.KindNil: default: return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) @@ -170,6 +185,11 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } case dyn.KindSequence, dyn.KindNil: default: return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index e5447fe80..ed0c11ca4 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -662,6 +662,42 @@ func TestFromTypedFloatTypeError(t *testing.T) { require.Error(t, err) } +func TestFromTypedAny(t *testing.T) { + type Tmp struct { + Foo any `json:"foo"` + Bar any `json:"bar"` + Foz any `json:"foz"` + Baz any `json:"baz"` + } + + src := Tmp{ + Foo: "foo", + Bar: false, + Foz: 0, + Baz: map[string]any{ + "foo": "foo", + "bar": 1234, + "qux": 0, + "nil": nil, + }, + } + + ref := dyn.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "foo": dyn.V("foo"), + "bar": dyn.V(false), + "foz": dyn.V(int64(0)), + "baz": dyn.V(map[string]dyn.Value{ + "foo": dyn.V("foo"), + "bar": dyn.V(int64(1234)), + "qux": dyn.V(int64(0)), + "nil": dyn.V(nil), + }), + }), nv) +} + func TestFromTypedAnyNil(t *testing.T) { var src any = nil var ref = dyn.NilValue diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 35d4d8210..ad82e20ef 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -56,6 +56,8 @@ func (n normalizeOptions) normalizeType(typ reflect.Type, src dyn.Value, seen [] return n.normalizeInt(typ, src, path) case reflect.Float32, reflect.Float64: return n.normalizeFloat(typ, src, path) + case reflect.Interface: + return n.normalizeInterface(typ, src, path) } return dyn.InvalidValue, diag.Errorf("unsupported type: %s", typ.Kind()) @@ -166,8 +168,15 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen return dyn.NewValue(out, src.Location()), diags case dyn.KindNil: return src, diags + + case dyn.KindString: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } } + // Cannot interpret as a struct. return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src, path)) } @@ -197,8 +206,15 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []r return dyn.NewValue(out, src.Location()), diags case dyn.KindNil: return src, diags + + case dyn.KindString: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } } + // Cannot interpret as a map. return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src, path)) } @@ -225,8 +241,15 @@ func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value, seen [ return dyn.NewValue(out, src.Location()), diags case dyn.KindNil: return src, diags + + case dyn.KindString: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } } + // Cannot interpret as a slice. return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindSequence, src, path)) } @@ -371,3 +394,7 @@ func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value, path d return dyn.NewValue(out, src.Location()), diags } + +func (n normalizeOptions) normalizeInterface(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { + return src, nil +} diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 843b4ea59..299ffcabd 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -223,6 +223,52 @@ func TestNormalizeStructIncludeMissingFieldsOnRecursiveType(t *testing.T) { }), vout) } +func TestNormalizeStructVariableReference(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.NewValue("${var.foo}", dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeStructRandomStringError(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.NewValue("var foo", dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found string`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeStructIntError(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found int`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + func TestNormalizeMap(t *testing.T) { var typ map[string]string vin := dyn.V(map[string]dyn.Value{ @@ -312,6 +358,40 @@ func TestNormalizeMapNestedError(t *testing.T) { ) } +func TestNormalizeMapVariableReference(t *testing.T) { + var typ map[string]string + vin := dyn.NewValue("${var.foo}", dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeMapRandomStringError(t *testing.T) { + var typ map[string]string + vin := dyn.NewValue("var foo", dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found string`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeMapIntError(t *testing.T) { + var typ map[string]string + vin := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found int`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + func TestNormalizeSlice(t *testing.T) { var typ []string vin := dyn.V([]dyn.Value{ @@ -400,6 +480,40 @@ func TestNormalizeSliceNestedError(t *testing.T) { ) } +func TestNormalizeSliceVariableReference(t *testing.T) { + var typ []string + vin := dyn.NewValue("${var.foo}", dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeSliceRandomStringError(t *testing.T) { + var typ []string + vin := dyn.NewValue("var foo", dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected sequence, found string`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeSliceIntError(t *testing.T) { + var typ []string + vin := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected sequence, found int`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + func TestNormalizeString(t *testing.T) { var typ string vin := dyn.V("string") @@ -725,3 +839,29 @@ func TestNormalizeAnchors(t *testing.T) { "foo": "bar", }, vout.AsAny()) } + +func TestNormalizeBoolToAny(t *testing.T) { + var typ any + vin := dyn.NewValue(false, dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue(false, dyn.Location{File: "file", Line: 1, Column: 1}), vout) +} + +func TestNormalizeIntToAny(t *testing.T) { + var typ any + vin := dyn.NewValue(10, dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue(10, dyn.Location{File: "file", Line: 1, Column: 1}), vout) +} + +func TestNormalizeSliceToAny(t *testing.T) { + var typ any + v1 := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + v2 := dyn.NewValue(2, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue([]dyn.Value{v1, v2}, dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue([]dyn.Value{v1, v2}, dyn.Location{File: "file", Line: 1, Column: 1}), vout) +} diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index f10853a2e..91d6445a1 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -46,6 +46,8 @@ func ToTyped(dst any, src dyn.Value) error { return toTypedInt(dstv, src) case reflect.Float32, reflect.Float64: return toTypedFloat(dstv, src) + case reflect.Interface: + return toTypedInterface(dstv, src) } return fmt.Errorf("unsupported type: %s", dstv.Kind()) @@ -101,6 +103,12 @@ func toTypedStruct(dst reflect.Value, src dyn.Value) error { case dyn.KindNil: dst.SetZero() return nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -132,6 +140,12 @@ func toTypedMap(dst reflect.Value, src dyn.Value) error { case dyn.KindNil: dst.SetZero() return nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -157,6 +171,12 @@ func toTypedSlice(dst reflect.Value, src dyn.Value) error { case dyn.KindNil: dst.SetZero() return nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -260,3 +280,8 @@ func toTypedFloat(dst reflect.Value, src dyn.Value) error { msg: fmt.Sprintf("expected a float, found a %s", src.Kind()), } } + +func toTypedInterface(dst reflect.Value, src dyn.Value) error { + dst.Set(reflect.ValueOf(src.AsAny())) + return nil +} diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index 56d98a3cf..5e37f2863 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -511,3 +511,25 @@ func TestToTypedWithAliasKeyType(t *testing.T) { assert.Equal(t, "bar", out["foo"]) assert.Equal(t, "baz", out["bar"]) } + +func TestToTypedAnyWithBool(t *testing.T) { + var out any + err := ToTyped(&out, dyn.V(false)) + require.NoError(t, err) + assert.Equal(t, false, out) + + err = ToTyped(&out, dyn.V(true)) + require.NoError(t, err) + assert.Equal(t, true, out) +} + +func TestToTypedAnyWithMap(t *testing.T) { + var out any + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), + }) + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Equal(t, map[string]any{"foo": "bar", "bar": "baz"}, out) +} diff --git a/libs/dyn/dynvar/ref.go b/libs/dyn/dynvar/ref.go index e6340269f..bf160fa85 100644 --- a/libs/dyn/dynvar/ref.go +++ b/libs/dyn/dynvar/ref.go @@ -6,7 +6,7 @@ import ( "github.com/databricks/cli/libs/dyn" ) -const VariableRegex = `\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\}` +const VariableRegex = `\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\[[0-9]+\])*)*(\[[0-9]+\])*)\}` var re = regexp.MustCompile(VariableRegex) diff --git a/libs/dyn/dynvar/resolve_test.go b/libs/dyn/dynvar/resolve_test.go index bbecbb776..498322a42 100644 --- a/libs/dyn/dynvar/resolve_test.go +++ b/libs/dyn/dynvar/resolve_test.go @@ -247,3 +247,63 @@ func TestResolveWithInterpolateAliasedRef(t *testing.T) { assert.Equal(t, "a", getByPath(t, out, "b").MustString()) assert.Equal(t, "a", getByPath(t, out, "c").MustString()) } + +func TestResolveIndexedRefs(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{dyn.V("a"), dyn.V("b")}), + "a": dyn.V("a: ${slice[0]}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} + +func TestResolveIndexedRefsFromMap(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "map": dyn.V( + map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{dyn.V("a")}), + }), + "a": dyn.V("a: ${map.slice[0]}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} + +func TestResolveMapFieldFromIndexedRefs(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "map": dyn.V( + map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{ + dyn.V(map[string]dyn.Value{ + "value": dyn.V("a"), + }), + }), + }), + "a": dyn.V("a: ${map.slice[0].value}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} + +func TestResolveNestedIndexedRefs(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{ + dyn.V([]dyn.Value{dyn.V("a")}), + }), + "a": dyn.V("a: ${slice[0][0]}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} From cdd6fe8cb96d76035337bfda0bb798b090095dfb Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 26 Jun 2024 14:24:31 +0200 Subject: [PATCH 249/286] Release v0.222.0 (#1529) CLI: * Add link to documentation for Homebrew installation to README ([#1505](https://github.com/databricks/cli/pull/1505)). * Fix `databricks configure` to use `DATABRICKS_CONFIG_FILE` environment variable if exists as config file ([#1325](https://github.com/databricks/cli/pull/1325)). Bundles: The Terraform upgrade to v1.48.0 includes a fix for library order not being respected. * Fix conditional in query in `default-sql` template ([#1479](https://github.com/databricks/cli/pull/1479)). * Remove user credentials specified in the Git origin URL ([#1494](https://github.com/databricks/cli/pull/1494)). * Serialize dynamic value for `bundle validate` output ([#1499](https://github.com/databricks/cli/pull/1499)). * Override variables with lookup value even if values has default value set ([#1504](https://github.com/databricks/cli/pull/1504)). * Pause quality monitors when "mode: development" is used ([#1481](https://github.com/databricks/cli/pull/1481)). * Return `fs.ModeDir` for Git folders in the workspace ([#1521](https://github.com/databricks/cli/pull/1521)). * Upgrade TF provider to 1.48.0 ([#1527](https://github.com/databricks/cli/pull/1527)). * Added support for complex variables ([#1467](https://github.com/databricks/cli/pull/1467)). Internal: * Add randIntn function ([#1475](https://github.com/databricks/cli/pull/1475)). * Avoid multiple file tree traversals on bundle deploy ([#1493](https://github.com/databricks/cli/pull/1493)). * Clean up unused code ([#1502](https://github.com/databricks/cli/pull/1502)). * Use `dyn.InvalidValue` to indicate absence ([#1507](https://github.com/databricks/cli/pull/1507)). * Add ApplyPythonMutator ([#1430](https://github.com/databricks/cli/pull/1430)). * Set bool pointer to disable lock ([#1516](https://github.com/databricks/cli/pull/1516)). * Allow the any type to be set to nil in `convert.FromTyped` ([#1518](https://github.com/databricks/cli/pull/1518)). * Properly deal with nil values in `convert.FromTyped` ([#1511](https://github.com/databricks/cli/pull/1511)). * Return `dyn.InvalidValue` instead of `dyn.NilValue` when errors happen ([#1514](https://github.com/databricks/cli/pull/1514)). * PythonMutator: replace stdin/stdout with files ([#1512](https://github.com/databricks/cli/pull/1512)). * Add context type and value to path rewriting ([#1525](https://github.com/databricks/cli/pull/1525)). API Changes: * Added schedule CRUD commands to `databricks lakeview`. * Added subscription CRUD commands to `databricks lakeview`. * Added `databricks apps start` command. OpenAPI commit 7437dabb9dadee402c1fc060df4c1ce8cc5369f0 (2024-06-24) Dependency updates: * Bump golang.org/x/text from 0.15.0 to 0.16.0 ([#1482](https://github.com/databricks/cli/pull/1482)). * Bump golang.org/x/term from 0.20.0 to 0.21.0 ([#1483](https://github.com/databricks/cli/pull/1483)). * Bump golang.org/x/mod from 0.17.0 to 0.18.0 ([#1484](https://github.com/databricks/cli/pull/1484)). * Bump golang.org/x/oauth2 from 0.20.0 to 0.21.0 ([#1485](https://github.com/databricks/cli/pull/1485)). * Bump github.com/briandowns/spinner from 1.23.0 to 1.23.1 ([#1495](https://github.com/databricks/cli/pull/1495)). * Bump github.com/spf13/cobra from 1.8.0 to 1.8.1 ([#1496](https://github.com/databricks/cli/pull/1496)). * Bump github.com/databricks/databricks-sdk-go from 0.42.0 to 0.43.0 ([#1522](https://github.com/databricks/cli/pull/1522)). --- CHANGELOG.md | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f6f47dc6..c5fcc45b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,53 @@ # Version changelog +## 0.222.0 + +CLI: + * Add link to documentation for Homebrew installation to README ([#1505](https://github.com/databricks/cli/pull/1505)). + * Fix `databricks configure` to use `DATABRICKS_CONFIG_FILE` environment variable if exists as config file ([#1325](https://github.com/databricks/cli/pull/1325)). + +Bundles: + +The Terraform upgrade to v1.48.0 includes a fix for library order not being respected. + + * Fix conditional in query in `default-sql` template ([#1479](https://github.com/databricks/cli/pull/1479)). + * Remove user credentials specified in the Git origin URL ([#1494](https://github.com/databricks/cli/pull/1494)). + * Serialize dynamic value for `bundle validate` output ([#1499](https://github.com/databricks/cli/pull/1499)). + * Override variables with lookup value even if values has default value set ([#1504](https://github.com/databricks/cli/pull/1504)). + * Pause quality monitors when "mode: development" is used ([#1481](https://github.com/databricks/cli/pull/1481)). + * Return `fs.ModeDir` for Git folders in the workspace ([#1521](https://github.com/databricks/cli/pull/1521)). + * Upgrade TF provider to 1.48.0 ([#1527](https://github.com/databricks/cli/pull/1527)). + * Added support for complex variables ([#1467](https://github.com/databricks/cli/pull/1467)). + +Internal: + * Add randIntn function ([#1475](https://github.com/databricks/cli/pull/1475)). + * Avoid multiple file tree traversals on bundle deploy ([#1493](https://github.com/databricks/cli/pull/1493)). + * Clean up unused code ([#1502](https://github.com/databricks/cli/pull/1502)). + * Use `dyn.InvalidValue` to indicate absence ([#1507](https://github.com/databricks/cli/pull/1507)). + * Add ApplyPythonMutator ([#1430](https://github.com/databricks/cli/pull/1430)). + * Set bool pointer to disable lock ([#1516](https://github.com/databricks/cli/pull/1516)). + * Allow the any type to be set to nil in `convert.FromTyped` ([#1518](https://github.com/databricks/cli/pull/1518)). + * Properly deal with nil values in `convert.FromTyped` ([#1511](https://github.com/databricks/cli/pull/1511)). + * Return `dyn.InvalidValue` instead of `dyn.NilValue` when errors happen ([#1514](https://github.com/databricks/cli/pull/1514)). + * PythonMutator: replace stdin/stdout with files ([#1512](https://github.com/databricks/cli/pull/1512)). + * Add context type and value to path rewriting ([#1525](https://github.com/databricks/cli/pull/1525)). + +API Changes: + * Added schedule CRUD commands to `databricks lakeview`. + * Added subscription CRUD commands to `databricks lakeview`. + * Added `databricks apps start` command. + +OpenAPI commit 7437dabb9dadee402c1fc060df4c1ce8cc5369f0 (2024-06-24) + +Dependency updates: + * Bump golang.org/x/text from 0.15.0 to 0.16.0 ([#1482](https://github.com/databricks/cli/pull/1482)). + * Bump golang.org/x/term from 0.20.0 to 0.21.0 ([#1483](https://github.com/databricks/cli/pull/1483)). + * Bump golang.org/x/mod from 0.17.0 to 0.18.0 ([#1484](https://github.com/databricks/cli/pull/1484)). + * Bump golang.org/x/oauth2 from 0.20.0 to 0.21.0 ([#1485](https://github.com/databricks/cli/pull/1485)). + * Bump github.com/briandowns/spinner from 1.23.0 to 1.23.1 ([#1495](https://github.com/databricks/cli/pull/1495)). + * Bump github.com/spf13/cobra from 1.8.0 to 1.8.1 ([#1496](https://github.com/databricks/cli/pull/1496)). + * Bump github.com/databricks/databricks-sdk-go from 0.42.0 to 0.43.0 ([#1522](https://github.com/databricks/cli/pull/1522)). + ## 0.221.1 Bundles: From dba6164a4ce5dd45b45f174f804f982db6ca94f0 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Thu, 27 Jun 2024 11:47:58 +0200 Subject: [PATCH 250/286] merge.Override: Fix handling of dyn.NilValue (#1530) ## Changes Fix handling of `dyn.NilValue` in `merge.Override` in case `dyn.Value` has location ## Tests Unit tests --- libs/dyn/merge/override.go | 8 +++----- libs/dyn/merge/override_test.go | 6 +++--- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/libs/dyn/merge/override.go b/libs/dyn/merge/override.go index 97e8f1009..81bbaa4d5 100644 --- a/libs/dyn/merge/override.go +++ b/libs/dyn/merge/override.go @@ -30,10 +30,6 @@ func Override(leftRoot dyn.Value, rightRoot dyn.Value, visitor OverrideVisitor) } func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { - if left == dyn.NilValue && right == dyn.NilValue { - return dyn.NilValue, nil - } - if left.Kind() != right.Kind() { return visitor.VisitUpdate(basePath, left, right) } @@ -98,9 +94,11 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri } else { return visitor.VisitUpdate(basePath, left, right) } + case dyn.KindNil: + return left, nil } - return dyn.InvalidValue, fmt.Errorf("unexpected kind %s", left.Kind()) + return dyn.InvalidValue, fmt.Errorf("unexpected kind %s at %s", left.Kind(), basePath.String()) } func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dyn.Mapping, visitor OverrideVisitor) (dyn.Mapping, error) { diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go index a34f23424..d8fd4e178 100644 --- a/libs/dyn/merge/override_test.go +++ b/libs/dyn/merge/override_test.go @@ -330,9 +330,9 @@ func TestOverride_Primitive(t *testing.T) { { name: "nil (not updated)", state: visitorState{}, - left: dyn.NilValue, - right: dyn.NilValue, - expected: dyn.NilValue, + left: dyn.NilValue.WithLocation(leftLocation), + right: dyn.NilValue.WithLocation(rightLocation), + expected: dyn.NilValue.WithLocation(leftLocation), }, { name: "nil (updated)", From 4d8eba04cd008120ea544afcd03c54c05cf2e5dd Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 27 Jun 2024 18:58:19 +0530 Subject: [PATCH 251/286] Compare `.Kind()` instead of direct equality checks on a `dyn.Value` (#1520) ## Changes This PR makes two changes: 1. In https://github.com/databricks/cli/pull/1510 we'll be adding multiple associated location metadata with a dyn.Value. The Go compiler does not allow comparing structs if they contain slice values (presumably due to multiple possible definitions for equality). In anticipation for adding a `[]dyn.Location` type field to `dyn.Value` this PR removes all direct comparisons of `dyn.Value` and instead relies on the kind. 2. Retain location metadata for values in convert.FromTyped. The change diff is exactly the same as https://github.com/databricks/cli/pull/1523. It's been combined with this PR because they both depend on each other to prevent test failures (forming a test failure deadlock). Go patch used: ``` @@ var x expression @@ -x == dyn.InvalidValue +x.Kind() == dyn.KindInvalid @@ var x expression @@ -x != dyn.InvalidValue +x.Kind() != dyn.KindInvalid @@ var x expression @@ -x == dyn.NilValue +x.Kind() == dyn.KindNil @@ var x expression @@ -x != dyn.NilValue +x.Kind() != dyn.KindNil ``` ## Tests Unit tests and integration tests pass. --- bundle/config/mutator/environments_compat.go | 4 +- bundle/config/mutator/merge_job_clusters.go | 2 +- bundle/config/mutator/merge_job_tasks.go | 2 +- .../config/mutator/merge_pipeline_clusters.go | 2 +- bundle/config/mutator/run_as.go | 19 +-- bundle/config/root.go | 18 +-- .../{ => empty_run_as}/databricks.yml | 0 .../empty_sp/databricks.yml | 5 + .../empty_user/databricks.yml | 5 + .../empty_user_and_sp/databricks.yml | 6 + .../override}/databricks.yml | 0 .../override}/override.yml | 0 bundle/tests/run_as_test.go | 58 +++++++--- libs/dyn/convert/from_typed.go | 38 +++--- libs/dyn/convert/from_typed_test.go | 109 ++++++++++++++---- libs/dyn/convert/to_typed.go | 2 +- 16 files changed, 192 insertions(+), 78 deletions(-) rename bundle/tests/run_as/not_allowed/neither_sp_nor_user/{ => empty_run_as}/databricks.yml (100%) create mode 100644 bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml create mode 100644 bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml create mode 100644 bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml rename bundle/tests/run_as/not_allowed/{neither_sp_nor_user_override => neither_sp_nor_user/override}/databricks.yml (100%) rename bundle/tests/run_as/not_allowed/{neither_sp_nor_user_override => neither_sp_nor_user/override}/override.yml (100%) diff --git a/bundle/config/mutator/environments_compat.go b/bundle/config/mutator/environments_compat.go index 053fd2e36..fb898edea 100644 --- a/bundle/config/mutator/environments_compat.go +++ b/bundle/config/mutator/environments_compat.go @@ -32,7 +32,7 @@ func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) dia targets := v.Get("targets") // Return an error if both "environments" and "targets" are set. - if environments != dyn.InvalidValue && targets != dyn.InvalidValue { + if environments.Kind() != dyn.KindInvalid && targets.Kind() != dyn.KindInvalid { return dyn.InvalidValue, fmt.Errorf( "both 'environments' and 'targets' are specified; only 'targets' should be used: %s", environments.Location().String(), @@ -40,7 +40,7 @@ func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) dia } // Rewrite "environments" to "targets". - if environments != dyn.InvalidValue && targets == dyn.InvalidValue { + if environments.Kind() != dyn.KindInvalid && targets.Kind() == dyn.KindInvalid { nv, err := dyn.Set(v, "targets", environments) if err != nil { return dyn.InvalidValue, err diff --git a/bundle/config/mutator/merge_job_clusters.go b/bundle/config/mutator/merge_job_clusters.go index ec6154608..aa131f287 100644 --- a/bundle/config/mutator/merge_job_clusters.go +++ b/bundle/config/mutator/merge_job_clusters.go @@ -32,7 +32,7 @@ func (m *mergeJobClusters) jobClusterKey(v dyn.Value) string { func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - if v == dyn.NilValue { + if v.Kind() == dyn.KindNil { return v, nil } diff --git a/bundle/config/mutator/merge_job_tasks.go b/bundle/config/mutator/merge_job_tasks.go index f9a9bf718..9498e8822 100644 --- a/bundle/config/mutator/merge_job_tasks.go +++ b/bundle/config/mutator/merge_job_tasks.go @@ -32,7 +32,7 @@ func (m *mergeJobTasks) taskKeyString(v dyn.Value) string { func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - if v == dyn.NilValue { + if v.Kind() == dyn.KindNil { return v, nil } diff --git a/bundle/config/mutator/merge_pipeline_clusters.go b/bundle/config/mutator/merge_pipeline_clusters.go index c75f65326..52f3e6fa6 100644 --- a/bundle/config/mutator/merge_pipeline_clusters.go +++ b/bundle/config/mutator/merge_pipeline_clusters.go @@ -35,7 +35,7 @@ func (m *mergePipelineClusters) clusterLabel(v dyn.Value) string { func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - if v == dyn.NilValue { + if v.Kind() == dyn.KindNil { return v, nil } diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index aecd1d17e..d344a988a 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -53,14 +53,20 @@ func (e errBothSpAndUserSpecified) Error() string { } func validateRunAs(b *bundle.Bundle) error { - runAs := b.Config.RunAs - - // Error if neither service_principal_name nor user_name are specified - if runAs.ServicePrincipalName == "" && runAs.UserName == "" { - return fmt.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as")) + neitherSpecifiedErr := fmt.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as")) + // Error if neither service_principal_name nor user_name are specified, but the + // run_as section is present. + if b.Config.Value().Get("run_as").Kind() == dyn.KindNil { + return neitherSpecifiedErr + } + // Error if one or both of service_principal_name and user_name are specified, + // but with empty values. + if b.Config.RunAs.ServicePrincipalName == "" && b.Config.RunAs.UserName == "" { + return neitherSpecifiedErr } // Error if both service_principal_name and user_name are specified + runAs := b.Config.RunAs if runAs.UserName != "" && runAs.ServicePrincipalName != "" { return errBothSpAndUserSpecified{ spName: runAs.ServicePrincipalName, @@ -163,8 +169,7 @@ func setPipelineOwnersToRunAsIdentity(b *bundle.Bundle) { func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { // Mutator is a no-op if run_as is not specified in the bundle - runAs := b.Config.RunAs - if runAs == nil { + if b.Config.Value().Get("run_as").Kind() == dyn.KindInvalid { return nil } diff --git a/bundle/config/root.go b/bundle/config/root.go index 0def1167b..60faba29c 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -346,7 +346,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `run_as`. This field must be overwritten if set, not merged. - if v := target.Get("run_as"); v != dyn.InvalidValue { + if v := target.Get("run_as"); v.Kind() != dyn.KindInvalid { root, err = dyn.Set(root, "run_as", v) if err != nil { return err @@ -354,7 +354,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Below, we're setting fields on the bundle key, so make sure it exists. - if root.Get("bundle") == dyn.InvalidValue { + if root.Get("bundle").Kind() == dyn.KindInvalid { root, err = dyn.Set(root, "bundle", dyn.NewValue(map[string]dyn.Value{}, dyn.Location{})) if err != nil { return err @@ -362,7 +362,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `mode`. This field must be overwritten if set, not merged. - if v := target.Get("mode"); v != dyn.InvalidValue { + if v := target.Get("mode"); v.Kind() != dyn.KindInvalid { root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("mode")), v) if err != nil { return err @@ -370,7 +370,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `compute_id`. This field must be overwritten if set, not merged. - if v := target.Get("compute_id"); v != dyn.InvalidValue { + if v := target.Get("compute_id"); v.Kind() != dyn.KindInvalid { root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")), v) if err != nil { return err @@ -378,7 +378,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `git`. - if v := target.Get("git"); v != dyn.InvalidValue { + if v := target.Get("git"); v.Kind() != dyn.KindInvalid { ref, err := dyn.GetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("git"))) if err != nil { ref = dyn.NewValue(map[string]dyn.Value{}, dyn.Location{}) @@ -391,7 +391,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // If the branch was overridden, we need to clear the inferred flag. - if branch := v.Get("branch"); branch != dyn.InvalidValue { + if branch := v.Get("branch"); branch.Kind() != dyn.KindInvalid { out, err = dyn.SetByPath(out, dyn.NewPath(dyn.Key("inferred")), dyn.NewValue(false, dyn.Location{})) if err != nil { return err @@ -419,7 +419,7 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { // For each target, rewrite the variables block. return dyn.Map(v, "targets", dyn.Foreach(func(_ dyn.Path, target dyn.Value) (dyn.Value, error) { // Confirm it has a variables block. - if target.Get("variables") == dyn.InvalidValue { + if target.Get("variables").Kind() == dyn.KindInvalid { return target, nil } @@ -464,7 +464,7 @@ func validateVariableOverrides(root, target dyn.Value) (err error) { var tv map[string]variable.Variable // Collect variables from the root. - if v := root.Get("variables"); v != dyn.InvalidValue { + if v := root.Get("variables"); v.Kind() != dyn.KindInvalid { err = convert.ToTyped(&rv, v) if err != nil { return fmt.Errorf("unable to collect variables from root: %w", err) @@ -472,7 +472,7 @@ func validateVariableOverrides(root, target dyn.Value) (err error) { } // Collect variables from the target. - if v := target.Get("variables"); v != dyn.InvalidValue { + if v := target.Get("variables"); v.Kind() != dyn.KindInvalid { err = convert.ToTyped(&tv, v) if err != nil { return fmt.Errorf("unable to collect variables from target: %w", err) diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_run_as/databricks.yml similarity index 100% rename from bundle/tests/run_as/not_allowed/neither_sp_nor_user/databricks.yml rename to bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_run_as/databricks.yml diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml new file mode 100644 index 000000000..be18f60e8 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml @@ -0,0 +1,5 @@ +bundle: + name: "abc" + +run_as: + service_principal_name: "" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml new file mode 100644 index 000000000..33c48cb58 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml @@ -0,0 +1,5 @@ +bundle: + name: "abc" + +run_as: + user_name: "" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml new file mode 100644 index 000000000..4b59dc918 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml @@ -0,0 +1,6 @@ +bundle: + name: "abc" + +run_as: + service_principal_name: "" + user_name: "" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/databricks.yml similarity index 100% rename from bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/databricks.yml rename to bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/databricks.yml diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/override.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/override.yml similarity index 100% rename from bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/override.yml rename to bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/override.yml diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 5ad7a89aa..6c07cc537 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -196,27 +196,53 @@ func TestRunAsErrorWhenBothUserAndSpSpecified(t *testing.T) { } func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) { - b := load(t, "./run_as/not_allowed/neither_sp_nor_user") + tcases := []struct { + name string + err string + }{ + { + name: "empty_run_as", + err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:8", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_run_as/databricks.yml")), + }, + { + name: "empty_sp", + err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:5:3", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml")), + }, + { + name: "empty_user", + err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:5:3", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml")), + }, + { + name: "empty_user_and_sp", + err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:5:3", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml")), + }, + } - ctx := context.Background() - bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - b.Config.Workspace.CurrentUser = &config.User{ - User: &iam.User{ - UserName: "my_service_principal", - }, - } - return nil - }) + for _, tc := range tcases { + t.Run(tc.name, func(t *testing.T) { - diags := bundle.Apply(ctx, b, mutator.SetRunAs()) - err := diags.Error() + bundlePath := fmt.Sprintf("./run_as/not_allowed/neither_sp_nor_user/%s", tc.name) + b := load(t, bundlePath) - configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/databricks.yml") - assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:8", configPath)) + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + assert.EqualError(t, err, tc.err) + }) + } } func TestRunAsErrorNeitherUserOrSpSpecifiedAtTargetOverride(t *testing.T) { - b := loadTarget(t, "./run_as/not_allowed/neither_sp_nor_user_override", "development") + b := loadTarget(t, "./run_as/not_allowed/neither_sp_nor_user/override", "development") ctx := context.Background() bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { @@ -231,7 +257,7 @@ func TestRunAsErrorNeitherUserOrSpSpecifiedAtTargetOverride(t *testing.T) { diags := bundle.Apply(ctx, b, mutator.SetRunAs()) err := diags.Error() - configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user_override/override.yml") + configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/override/override.yml") assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:12", configPath)) } diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index 15c5b7978..e8d321f66 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -42,7 +42,7 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, // Dereference pointer if necessary for srcv.Kind() == reflect.Pointer { if srcv.IsNil() { - return dyn.NilValue, nil + return dyn.NilValue.WithLocation(ref.Location()), nil } srcv = srcv.Elem() @@ -55,27 +55,35 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, } } + var v dyn.Value + var err error switch srcv.Kind() { case reflect.Struct: - return fromTypedStruct(srcv, ref, options...) + v, err = fromTypedStruct(srcv, ref, options...) case reflect.Map: - return fromTypedMap(srcv, ref) + v, err = fromTypedMap(srcv, ref) case reflect.Slice: - return fromTypedSlice(srcv, ref) + v, err = fromTypedSlice(srcv, ref) case reflect.String: - return fromTypedString(srcv, ref, options...) + v, err = fromTypedString(srcv, ref, options...) case reflect.Bool: - return fromTypedBool(srcv, ref, options...) + v, err = fromTypedBool(srcv, ref, options...) case reflect.Int, reflect.Int32, reflect.Int64: - return fromTypedInt(srcv, ref, options...) + v, err = fromTypedInt(srcv, ref, options...) case reflect.Float32, reflect.Float64: - return fromTypedFloat(srcv, ref, options...) + v, err = fromTypedFloat(srcv, ref, options...) case reflect.Invalid: // If the value is untyped and not set (e.g. any type with nil value), we return nil. - return dyn.NilValue, nil + v, err = dyn.NilValue, nil + default: + return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) } - return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) + // Ensure the location metadata is retained. + if err != nil { + return dyn.InvalidValue, err + } + return v.WithLocation(ref.Location()), err } func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -117,7 +125,7 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptio } // Either if the key was set in the reference or the field is not zero-valued, we include it. - if ok || nv != dyn.NilValue { + if ok || nv.Kind() != dyn.KindNil { out.Set(refk, nv) } } @@ -127,7 +135,7 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptio // 2. The reference is a map (i.e. the struct was and still is empty). // 3. The "includeZeroValues" option is set (i.e. the struct is a non-nil pointer). if out.Len() > 0 || ref.Kind() == dyn.KindMap || slices.Contains(options, includeZeroValues) { - return dyn.NewValue(out, ref.Location()), nil + return dyn.V(out), nil } // Otherwise, return nil. @@ -179,7 +187,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out.Set(refk, nv) } - return dyn.NewValue(out, ref.Location()), nil + return dyn.V(out), nil } func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { @@ -206,7 +214,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { refv := ref.Index(i) // Use nil reference if there is no reference for this index. - if refv == dyn.InvalidValue { + if refv.Kind() == dyn.KindInvalid { refv = dyn.NilValue } @@ -219,7 +227,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out[i] = nv } - return dyn.NewValue(out, ref.Location()), nil + return dyn.V(out), nil } func fromTypedString(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index ed0c11ca4..9141a6948 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -49,7 +49,7 @@ func TestFromTypedStructPointerZeroFields(t *testing.T) { require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) - // For an initialized pointer with a nil reference we expect a nil. + // For an initialized pointer with a nil reference we expect an empty map. src = &Tmp{} nv, err = FromTyped(src, dyn.NilValue) require.NoError(t, err) @@ -103,7 +103,7 @@ func TestFromTypedStructSetFields(t *testing.T) { }), nv) } -func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedStructSetFieldsRetainLocation(t *testing.T) { type Tmp struct { Foo string `json:"foo"` Bar string `json:"bar"` @@ -122,11 +122,9 @@ func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. + // Assert foo and bar have retained their location. assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) + assert.Equal(t, dyn.NewValue("qux", dyn.Location{File: "bar"}), nv.Get("bar")) } func TestFromTypedStringMapWithZeroValue(t *testing.T) { @@ -354,7 +352,7 @@ func TestFromTypedMapNonEmpty(t *testing.T) { }), nv) } -func TestFromTypedMapNonEmptyRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { var src = map[string]string{ "foo": "bar", "bar": "qux", @@ -368,11 +366,9 @@ func TestFromTypedMapNonEmptyRetainLocationIfUnchanged(t *testing.T) { nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. + // Assert foo and bar have retained their locations. assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) + assert.Equal(t, dyn.NewValue("qux", dyn.Location{File: "bar"}), nv.Get("bar")) } func TestFromTypedMapFieldWithZeroValue(t *testing.T) { @@ -429,7 +425,7 @@ func TestFromTypedSliceNonEmpty(t *testing.T) { }), nv) } -func TestFromTypedSliceNonEmptyRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { var src = []string{ "foo", "bar", @@ -437,17 +433,15 @@ func TestFromTypedSliceNonEmptyRetainLocationIfUnchanged(t *testing.T) { ref := dyn.V([]dyn.Value{ dyn.NewValue("foo", dyn.Location{File: "foo"}), - dyn.NewValue("baz", dyn.Location{File: "baz"}), + dyn.NewValue("bar", dyn.Location{File: "bar"}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. + // Assert foo and bar have retained their locations. assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv.Index(0)) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("bar", dyn.Location{}), nv.Index(1)) + assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "bar"}), nv.Index(1)) } func TestFromTypedStringEmpty(t *testing.T) { @@ -482,12 +476,20 @@ func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V("new"), nv) } -func TestFromTypedStringRetainsLocationsIfUnchanged(t *testing.T) { - var src string = "foo" +func TestFromTypedStringRetainsLocations(t *testing.T) { var ref = dyn.NewValue("foo", dyn.Location{File: "foo"}) + + // case: value has not been changed + var src string = "foo" nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = "bar" + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv) } func TestFromTypedStringTypeError(t *testing.T) { @@ -529,12 +531,20 @@ func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(true), nv) } -func TestFromTypedBoolRetainsLocationsIfUnchanged(t *testing.T) { - var src bool = true +func TestFromTypedBoolRetainsLocations(t *testing.T) { var ref = dyn.NewValue(true, dyn.Location{File: "foo"}) + + // case: value has not been changed + var src bool = true nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(true, dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = false + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(false, dyn.Location{File: "foo"}), nv) } func TestFromTypedBoolVariableReference(t *testing.T) { @@ -584,12 +594,20 @@ func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(int64(1234)), nv) } -func TestFromTypedIntRetainsLocationsIfUnchanged(t *testing.T) { - var src int = 1234 +func TestFromTypedIntRetainsLocations(t *testing.T) { var ref = dyn.NewValue(1234, dyn.Location{File: "foo"}) + + // case: value has not been changed + var src int = 1234 nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(1234, dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = 1235 + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(int64(1235), dyn.Location{File: "foo"}), nv) } func TestFromTypedIntVariableReference(t *testing.T) { @@ -639,12 +657,21 @@ func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(1.23), nv) } -func TestFromTypedFloatRetainsLocationsIfUnchanged(t *testing.T) { - var src float64 = 1.23 +func TestFromTypedFloatRetainsLocations(t *testing.T) { + var src float64 var ref = dyn.NewValue(1.23, dyn.Location{File: "foo"}) + + // case: value has not been changed + src = 1.23 nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(1.23, dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = 1.24 + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(1.24, dyn.Location{File: "foo"}), nv) } func TestFromTypedFloatVariableReference(t *testing.T) { @@ -705,3 +732,35 @@ func TestFromTypedAnyNil(t *testing.T) { require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) } + +func TestFromTypedNilPointerRetainsLocations(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + var src *Tmp + ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) +} + +func TestFromTypedNilMapRetainsLocation(t *testing.T) { + var src map[string]string + ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) +} + +func TestFromTypedNilSliceRetainsLocation(t *testing.T) { + var src []string + ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) +} diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index 91d6445a1..8febe87ae 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -16,7 +16,7 @@ func ToTyped(dst any, src dyn.Value) error { for dstv.Kind() == reflect.Pointer { // If the source value is nil and the destination is a settable pointer, // set the destination to nil. Also see `end_to_end_test.go`. - if dstv.CanSet() && src == dyn.NilValue { + if dstv.CanSet() && src.Kind() == dyn.KindNil { dstv.SetZero() return nil } From aee3910f3d5b7e982d59c9efbd3ad390240758e4 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Mon, 1 Jul 2024 09:46:37 +0200 Subject: [PATCH 252/286] PythonMutator: register product in user agent extra (#1533) ## Changes Register user agent product following RFC 9110. See https://github.com/databricks/terraform-provider-databricks/pull/3520 for Terraform change. ## Tests Unit tests --- bundle/deploy/terraform/init.go | 22 ++++++++++++++++++++++ bundle/deploy/terraform/init_test.go | 21 +++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index d1847cf24..d480242ce 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -218,6 +218,23 @@ func setProxyEnvVars(ctx context.Context, environ map[string]string, b *bundle.B return nil } +func setUserAgentExtraEnvVar(environ map[string]string, b *bundle.Bundle) error { + var products []string + + if experimental := b.Config.Experimental; experimental != nil { + if experimental.PyDABs.Enabled { + products = append(products, "databricks-pydabs/0.0.0") + } + } + + userAgentExtra := strings.Join(products, " ") + if userAgentExtra != "" { + environ["DATABRICKS_USER_AGENT_EXTRA"] = userAgentExtra + } + + return nil +} + func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tfConfig := b.Config.Bundle.Terraform if tfConfig == nil { @@ -262,6 +279,11 @@ func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnosti return diag.FromErr(err) } + err = setUserAgentExtraEnvVar(environ, b) + if err != nil { + return diag.FromErr(err) + } + // Configure environment variables for auth for Terraform to use. log.Debugf(ctx, "Environment variables for Terraform: %s", strings.Join(maps.Keys(environ), ", ")) err = tf.SetEnv(environ) diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index 421e9be3f..aa9b2f77f 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -248,6 +248,27 @@ func TestSetProxyEnvVars(t *testing.T) { assert.ElementsMatch(t, []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}, maps.Keys(env)) } +func TestSetUserAgentExtraEnvVar(t *testing.T) { + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Experimental: &config.Experimental{ + PyDABs: config.PyDABs{ + Enabled: true, + }, + }, + }, + } + + env := make(map[string]string, 0) + err := setUserAgentExtraEnvVar(env, b) + + require.NoError(t, err) + assert.Equal(t, map[string]string{ + "DATABRICKS_USER_AGENT_EXTRA": "databricks-pydabs/0.0.0", + }, env) +} + func TestInheritEnvVars(t *testing.T) { env := map[string]string{} From c7a36921b46884cef432143e2d78a566e738f5e0 Mon Sep 17 00:00:00 2001 From: kijewskimateusz <65126185+kijewskimateusz@users.noreply.github.com> Date: Mon, 1 Jul 2024 09:52:22 +0200 Subject: [PATCH 253/286] Fix non-default project names not working in dbt-sql template (#1500) ## Changes Hello Team, While tinkering with your solution, I've noticed that profiles provided in dbt_project.yml and profiles.yml for generated dbt asset bundles. do not align. This led to the following error, when deploying DAB: ``` + dbt deps --target=dev 11:24:02 Running with dbt=1.8.2 11:24:02 Warning: No packages were found in packages.yml 11:24:02 Warning: No packages were found in packages.yml + dbt seed --target=dev --vars '{ dev_schema: mateusz_kijewski }' 11:24:05 Running with dbt=1.8.2 11:24:05 Encountered an error: Runtime Error Could not find profile named 'dbt_sql' ``` I have corrected profile name in profiles.yml.tmpl to the name used in dbt_project.yml.tmpl. Using the opportunity of forking your repo, I've also updated tests configuration in model config as starting of dbt v1.8 it's been raising warnings of configuration change from tests to data_tests ``` 11:31:34 [WARNING]: Deprecated functionality The `tests` config has been renamed to `data_tests`. Please see https://docs.getdbt.com/docs/build/data-tests#new-data_tests-syntax for more information. ``` ## Tests --- .../template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl | 2 +- .../dbt-sql/template/{{.project_name}}/requirements-dev.txt | 2 +- .../resources/{{.project_name}}_job.yml.tmpl | 2 +- .../template/{{.project_name}}/src/models/example/schema.yml | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl index cce80f8d4..e96931e2d 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl @@ -3,7 +3,7 @@ {{- $catalog = "\"\" # workspace default"}} {{- end}} # This file defines dbt profiles for deployed dbt jobs. -my_dbt_project: +{{.project_name}}: target: dev # default target outputs: diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt b/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt index 10d7b9f10..e6b861203 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt @@ -1,3 +1,3 @@ ## requirements-dev.txt: dependencies for local development. -dbt-databricks>=1.0.0,<2.0.0 +dbt-databricks>=1.8.0,<2.0.0 diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl index acf1aa480..bad12c755 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl @@ -35,7 +35,7 @@ resources: libraries: - pypi: - package: dbt-databricks>=1.0.0,<2.0.0 + package: dbt-databricks>=1.8.0,<2.0.0 new_cluster: spark_version: {{template "latest_lts_dbr_version"}} diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml index d34b9e645..c64f1bfce 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml @@ -7,7 +7,7 @@ models: columns: - name: customer_name description: "The name of a customer" - tests: + data_tests: - unique - not_null @@ -16,6 +16,6 @@ models: columns: - name: order_date description: "The date on which orders took place" - tests: + data_tests: - unique - not_null From e8b76a7f13d902b7fdf96f5a9faff98f4b2fa4e9 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Mon, 1 Jul 2024 11:01:10 +0200 Subject: [PATCH 254/286] Improve `bundle validate` output (#1532) ## Changes This combination of changes allows pretty-printing errors happening during the "load" and "init" phases, including their locations. Move to render code into a separate module dedicated to rendering `diag.Diagnostics` in a human-readable format. This will be used for the `bundle deploy` command. Preserve the "bundle" value if an error occurs in mutators. Rewrite the go templates to handle the case when the bundle isn't yet loaded if an error occurs during loading, that is possible now. Improve rendering for errors and warnings: - don't render empty locations - render "details" for errors if they exist Add `root.ErrAlreadyPrinted` indicating that the error was already printed, and the CLI entry point shouldn't print it again. ## Tests Add tests for output, that are especially handy to detect extra newlines --- bundle/render/render_text_output.go | 176 ++++++++++++++++ bundle/render/render_text_output_test.go | 258 +++++++++++++++++++++++ bundle/tests/suggest_target_test.go | 15 +- cmd/bundle/utils/utils.go | 7 +- cmd/bundle/validate.go | 135 +++--------- cmd/root/bundle.go | 6 +- cmd/root/root.go | 3 +- cmd/root/silent_err.go | 7 + 8 files changed, 480 insertions(+), 127 deletions(-) create mode 100644 bundle/render/render_text_output.go create mode 100644 bundle/render/render_text_output_test.go create mode 100644 cmd/root/silent_err.go diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go new file mode 100644 index 000000000..37ea188f7 --- /dev/null +++ b/bundle/render/render_text_output.go @@ -0,0 +1,176 @@ +package render + +import ( + "fmt" + "io" + "path/filepath" + "strings" + "text/template" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/fatih/color" +) + +var renderFuncMap = template.FuncMap{ + "red": color.RedString, + "green": color.GreenString, + "blue": color.BlueString, + "yellow": color.YellowString, + "magenta": color.MagentaString, + "cyan": color.CyanString, + "bold": func(format string, a ...interface{}) string { + return color.New(color.Bold).Sprintf(format, a...) + }, + "italic": func(format string, a ...interface{}) string { + return color.New(color.Italic).Sprintf(format, a...) + }, +} + +const errorTemplate = `{{ "Error" | red }}: {{ .Summary }} +{{- if .Path.String }} + {{ "at " }}{{ .Path.String | green }} +{{- end }} +{{- if .Location.File }} + {{ "in " }}{{ .Location.String | cyan }} +{{- end }} +{{- if .Detail }} + +{{ .Detail }} +{{- end }} + +` + +const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }} +{{- if .Path.String }} + {{ "at " }}{{ .Path.String | green }} +{{- end }} +{{- if .Location.File }} + {{ "in " }}{{ .Location.String | cyan }} +{{- end }} +{{- if .Detail }} + +{{ .Detail }} +{{- end }} + +` + +const summaryTemplate = `{{- if .Name -}} +Name: {{ .Name | bold }} +{{- if .Target }} +Target: {{ .Target | bold }} +{{- end }} +{{- if or .User .Host .Path }} +Workspace: +{{- if .Host }} + Host: {{ .Host | bold }} +{{- end }} +{{- if .User }} + User: {{ .User | bold }} +{{- end }} +{{- if .Path }} + Path: {{ .Path | bold }} +{{- end }} +{{- end }} + +{{ end -}} + +{{ .Trailer }} +` + +func pluralize(n int, singular, plural string) string { + if n == 1 { + return fmt.Sprintf("%d %s", n, singular) + } + return fmt.Sprintf("%d %s", n, plural) +} + +func buildTrailer(diags diag.Diagnostics) string { + parts := []string{} + if errors := len(diags.Filter(diag.Error)); errors > 0 { + parts = append(parts, color.RedString(pluralize(errors, "error", "errors"))) + } + if warnings := len(diags.Filter(diag.Warning)); warnings > 0 { + parts = append(parts, color.YellowString(pluralize(warnings, "warning", "warnings"))) + } + if len(parts) > 0 { + return fmt.Sprintf("Found %s", strings.Join(parts, " and ")) + } else { + return color.GreenString("Validation OK!") + } +} + +func renderSummaryTemplate(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { + if b == nil { + return renderSummaryTemplate(out, &bundle.Bundle{}, diags) + } + + var currentUser = &iam.User{} + + if b.Config.Workspace.CurrentUser != nil { + if b.Config.Workspace.CurrentUser.User != nil { + currentUser = b.Config.Workspace.CurrentUser.User + } + } + + t := template.Must(template.New("summary").Funcs(renderFuncMap).Parse(summaryTemplate)) + err := t.Execute(out, map[string]any{ + "Name": b.Config.Bundle.Name, + "Target": b.Config.Bundle.Target, + "User": currentUser.UserName, + "Path": b.Config.Workspace.RootPath, + "Host": b.Config.Workspace.Host, + "Trailer": buildTrailer(diags), + }) + + return err +} + +func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { + errorT := template.Must(template.New("error").Funcs(renderFuncMap).Parse(errorTemplate)) + warningT := template.Must(template.New("warning").Funcs(renderFuncMap).Parse(warningTemplate)) + + // Print errors and warnings. + for _, d := range diags { + var t *template.Template + switch d.Severity { + case diag.Error: + t = errorT + case diag.Warning: + t = warningT + } + + // Make file relative to bundle root + if d.Location.File != "" { + out, err := filepath.Rel(b.RootPath, d.Location.File) + // if we can't relativize the path, just use path as-is + if err == nil { + d.Location.File = out + } + } + + // Render the diagnostic with the appropriate template. + err := t.Execute(out, d) + if err != nil { + return fmt.Errorf("failed to render template: %w", err) + } + } + + return nil +} + +// RenderTextOutput renders the diagnostics in a human-readable format. +func RenderTextOutput(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { + err := renderDiagnostics(out, b, diags) + if err != nil { + return fmt.Errorf("failed to render diagnostics: %w", err) + } + + err = renderSummaryTemplate(out, b, diags) + if err != nil { + return fmt.Errorf("failed to render summary: %w", err) + } + + return nil +} diff --git a/bundle/render/render_text_output_test.go b/bundle/render/render_text_output_test.go new file mode 100644 index 000000000..4ae86ded7 --- /dev/null +++ b/bundle/render/render_text_output_test.go @@ -0,0 +1,258 @@ +package render + +import ( + "bytes" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/require" +) + +type renderTestOutputTestCase struct { + name string + bundle *bundle.Bundle + diags diag.Diagnostics + expected string +} + +func TestRenderTextOutput(t *testing.T) { + loadingBundle := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "test-bundle", + Target: "test-target", + }, + }, + } + + testCases := []renderTestOutputTestCase{ + { + name: "nil bundle and 1 error", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "failed to load xxx", + }, + }, + expected: "Error: failed to load xxx\n" + + "\n" + + "Found 1 error\n", + }, + { + name: "bundle during 'load' and 1 error", + bundle: loadingBundle, + diags: diag.Errorf("failed to load bundle"), + expected: "Error: failed to load bundle\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 1 error\n", + }, + { + name: "bundle during 'load' and 1 warning", + bundle: loadingBundle, + diags: diag.Warningf("failed to load bundle"), + expected: "Warning: failed to load bundle\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 1 warning\n", + }, + { + name: "bundle during 'load' and 2 warnings", + bundle: loadingBundle, + diags: diag.Warningf("warning (1)").Extend(diag.Warningf("warning (2)")), + expected: "Warning: warning (1)\n" + + "\n" + + "Warning: warning (2)\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 2 warnings\n", + }, + { + name: "bundle during 'load' and 2 errors, 1 warning with details", + bundle: loadingBundle, + diags: diag.Diagnostics{ + diag.Diagnostic{ + Severity: diag.Error, + Summary: "error (1)", + Detail: "detail (1)", + Location: dyn.Location{ + File: "foo.py", + Line: 1, + Column: 1, + }, + }, + diag.Diagnostic{ + Severity: diag.Error, + Summary: "error (2)", + Detail: "detail (2)", + Location: dyn.Location{ + File: "foo.py", + Line: 2, + Column: 1, + }, + }, + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "warning (3)", + Detail: "detail (3)", + Location: dyn.Location{ + File: "foo.py", + Line: 3, + Column: 1, + }, + }, + }, + expected: "Error: error (1)\n" + + " in foo.py:1:1\n" + + "\n" + + "detail (1)\n" + + "\n" + + "Error: error (2)\n" + + " in foo.py:2:1\n" + + "\n" + + "detail (2)\n" + + "\n" + + "Warning: warning (3)\n" + + " in foo.py:3:1\n" + + "\n" + + "detail (3)\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 2 errors and 1 warning\n", + }, + { + name: "bundle during 'init'", + bundle: &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "test-bundle", + Target: "test-target", + }, + Workspace: config.Workspace{ + Host: "https://localhost/", + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "test-user", + }, + }, + RootPath: "/Users/test-user@databricks.com/.bundle/examples/test-target", + }, + }, + }, + diags: nil, + expected: "Name: test-bundle\n" + + "Target: test-target\n" + + "Workspace:\n" + + " Host: https://localhost/\n" + + " User: test-user\n" + + " Path: /Users/test-user@databricks.com/.bundle/examples/test-target\n" + + "\n" + + "Validation OK!\n", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + writer := &bytes.Buffer{} + + err := RenderTextOutput(writer, tc.bundle, tc.diags) + require.NoError(t, err) + + assert.Equal(t, tc.expected, writer.String()) + }) + } +} + +type renderDiagnosticsTestCase struct { + name string + diags diag.Diagnostics + expected string +} + +func TestRenderDiagnostics(t *testing.T) { + bundle := &bundle.Bundle{} + + testCases := []renderDiagnosticsTestCase{ + { + name: "empty diagnostics", + diags: diag.Diagnostics{}, + expected: "", + }, + { + name: "error with short summary", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "failed to load xxx", + }, + }, + expected: "Error: failed to load xxx\n\n", + }, + { + name: "error with source location", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "failed to load xxx", + Detail: "'name' is required", + Location: dyn.Location{ + File: "foo.yaml", + Line: 1, + Column: 2, + }, + }, + }, + expected: "Error: failed to load xxx\n" + + " in foo.yaml:1:2\n\n" + + "'name' is required\n\n", + }, + { + name: "error with path", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Detail: "'name' is required", + Summary: "failed to load xxx", + Path: dyn.MustPathFromString("resources.jobs.xxx"), + }, + }, + expected: "Error: failed to load xxx\n" + + " at resources.jobs.xxx\n" + + "\n" + + "'name' is required\n\n", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + writer := &bytes.Buffer{} + + err := renderDiagnostics(writer, bundle, tc.diags) + require.NoError(t, err) + + assert.Equal(t, tc.expected, writer.String()) + }) + } +} + +func TestRenderSummaryTemplate_nilBundle(t *testing.T) { + writer := &bytes.Buffer{} + + err := renderSummaryTemplate(writer, nil, nil) + require.NoError(t, err) + + assert.Equal(t, "Validation OK!\n", writer.String()) +} diff --git a/bundle/tests/suggest_target_test.go b/bundle/tests/suggest_target_test.go index 924d6a4e1..8fb130409 100644 --- a/bundle/tests/suggest_target_test.go +++ b/bundle/tests/suggest_target_test.go @@ -4,14 +4,19 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/cmd/root" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/cli/internal" - "github.com/stretchr/testify/require" ) func TestSuggestTargetIfWrongPassed(t *testing.T) { t.Setenv("BUNDLE_ROOT", filepath.Join("target_overrides", "workspace")) - _, _, err := internal.RequireErrorRun(t, "bundle", "validate", "-e", "incorrect") - require.ErrorContains(t, err, "Available targets:") - require.ErrorContains(t, err, "development") - require.ErrorContains(t, err, "staging") + stdoutBytes, _, err := internal.RequireErrorRun(t, "bundle", "validate", "-e", "incorrect") + stdout := stdoutBytes.String() + + assert.Error(t, root.ErrAlreadyPrinted, err) + assert.Contains(t, stdout, "Available targets:") + assert.Contains(t, stdout, "development") + assert.Contains(t, stdout, "staging") } diff --git a/cmd/bundle/utils/utils.go b/cmd/bundle/utils/utils.go index d585c6220..ce3774cf5 100644 --- a/cmd/bundle/utils/utils.go +++ b/cmd/bundle/utils/utils.go @@ -20,19 +20,16 @@ func ConfigureBundleWithVariables(cmd *cobra.Command) (*bundle.Bundle, diag.Diag // Load bundle config and apply target b, diags := root.MustConfigureBundle(cmd) if diags.HasError() { - return nil, diags + return b, diags } variables, err := cmd.Flags().GetStringSlice("var") if err != nil { - return nil, diag.FromErr(err) + return b, diag.FromErr(err) } // Initialize variables by assigning them values passed as command line flags diags = diags.Extend(configureVariables(cmd, b, variables)) - if diags.HasError() { - return nil, diags - } return b, diags } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index a1f8d2681..59a977047 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -3,121 +3,18 @@ package bundle import ( "encoding/json" "fmt" - "path/filepath" - "strings" - "text/template" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/validate" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/bundle/render" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/flags" - "github.com/fatih/color" "github.com/spf13/cobra" ) -var validateFuncMap = template.FuncMap{ - "red": color.RedString, - "green": color.GreenString, - "blue": color.BlueString, - "yellow": color.YellowString, - "magenta": color.MagentaString, - "cyan": color.CyanString, - "bold": func(format string, a ...interface{}) string { - return color.New(color.Bold).Sprintf(format, a...) - }, - "italic": func(format string, a ...interface{}) string { - return color.New(color.Italic).Sprintf(format, a...) - }, -} - -const errorTemplate = `{{ "Error" | red }}: {{ .Summary }} - {{ "at " }}{{ .Path.String | green }} - {{ "in " }}{{ .Location.String | cyan }} - -` - -const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }} - {{ "at " }}{{ .Path.String | green }} - {{ "in " }}{{ .Location.String | cyan }} - -` - -const summaryTemplate = `Name: {{ .Config.Bundle.Name | bold }} -Target: {{ .Config.Bundle.Target | bold }} -Workspace: - Host: {{ .WorkspaceClient.Config.Host | bold }} - User: {{ .Config.Workspace.CurrentUser.UserName | bold }} - Path: {{ .Config.Workspace.RootPath | bold }} - -{{ .Trailer }} -` - -func pluralize(n int, singular, plural string) string { - if n == 1 { - return fmt.Sprintf("%d %s", n, singular) - } - return fmt.Sprintf("%d %s", n, plural) -} - -func buildTrailer(diags diag.Diagnostics) string { - parts := []string{} - if errors := len(diags.Filter(diag.Error)); errors > 0 { - parts = append(parts, color.RedString(pluralize(errors, "error", "errors"))) - } - if warnings := len(diags.Filter(diag.Warning)); warnings > 0 { - parts = append(parts, color.YellowString(pluralize(warnings, "warning", "warnings"))) - } - if len(parts) > 0 { - return fmt.Sprintf("Found %s", strings.Join(parts, " and ")) - } else { - return color.GreenString("Validation OK!") - } -} - -func renderTextOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnostics) error { - errorT := template.Must(template.New("error").Funcs(validateFuncMap).Parse(errorTemplate)) - warningT := template.Must(template.New("warning").Funcs(validateFuncMap).Parse(warningTemplate)) - - // Print errors and warnings. - for _, d := range diags { - var t *template.Template - switch d.Severity { - case diag.Error: - t = errorT - case diag.Warning: - t = warningT - } - - // Make file relative to bundle root - if d.Location.File != "" { - out, _ := filepath.Rel(b.RootPath, d.Location.File) - d.Location.File = out - } - - // Render the diagnostic with the appropriate template. - err := t.Execute(cmd.OutOrStdout(), d) - if err != nil { - return err - } - } - - // Print validation summary. - t := template.Must(template.New("summary").Funcs(validateFuncMap).Parse(summaryTemplate)) - err := t.Execute(cmd.OutOrStdout(), map[string]any{ - "Config": b.Config, - "Trailer": buildTrailer(diags), - "WorkspaceClient": b.WorkspaceClient(), - }) - if err != nil { - return err - } - - return diags.Error() -} - func renderJsonOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnostics) error { buf, err := json.MarshalIndent(b.Config.Value().AsAny(), "", " ") if err != nil { @@ -137,19 +34,35 @@ func newValidateCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() b, diags := utils.ConfigureBundleWithVariables(cmd) - if err := diags.Error(); err != nil { - return diags.Error() + + if b == nil { + if err := diags.Error(); err != nil { + return diags.Error() + } else { + return fmt.Errorf("invariant failed: returned bundle is nil") + } } - diags = diags.Extend(bundle.Apply(ctx, b, phases.Initialize())) - diags = diags.Extend(bundle.Apply(ctx, b, validate.Validate())) - if err := diags.Error(); err != nil { - return err + if !diags.HasError() { + diags = diags.Extend(bundle.Apply(ctx, b, phases.Initialize())) + } + + if !diags.HasError() { + diags = diags.Extend(bundle.Apply(ctx, b, validate.Validate())) } switch root.OutputType(cmd) { case flags.OutputText: - return renderTextOutput(cmd, b, diags) + err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags) + if err != nil { + return fmt.Errorf("failed to render output: %w", err) + } + + if diags.HasError() { + return root.ErrAlreadyPrinted + } + + return nil case flags.OutputJSON: return renderJsonOutput(cmd, b, diags) default: diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index 4ed89c57b..8b98f2cf2 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -76,15 +76,11 @@ func configureBundle(cmd *cobra.Command, b *bundle.Bundle) (*bundle.Bundle, diag ctx := cmd.Context() diags := bundle.Apply(ctx, b, m) if diags.HasError() { - return nil, diags + return b, diags } // Configure the workspace profile if the flag has been set. diags = diags.Extend(configureProfile(cmd, b)) - if diags.HasError() { - return nil, diags - } - return b, diags } diff --git a/cmd/root/root.go b/cmd/root/root.go index 38eb42ccb..91e91d368 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -2,6 +2,7 @@ package root import ( "context" + "errors" "fmt" "os" "strings" @@ -97,7 +98,7 @@ func Execute(cmd *cobra.Command) { // Run the command cmd, err := cmd.ExecuteContextC(ctx) - if err != nil { + if err != nil && errors.Is(err, ErrAlreadyPrinted) { // If cmdio logger initialization succeeds, then this function logs with the // initialized cmdio logger, otherwise with the default cmdio logger cmdio.LogError(cmd.Context(), err) diff --git a/cmd/root/silent_err.go b/cmd/root/silent_err.go new file mode 100644 index 000000000..b361cc6b4 --- /dev/null +++ b/cmd/root/silent_err.go @@ -0,0 +1,7 @@ +package root + +import "errors" + +// ErrAlreadyPrinted is not printed to the user. It's used to signal that the command should exit with an error, +// but the error message was already printed. +var ErrAlreadyPrinted = errors.New("AlreadyPrinted") From da603c6ead648f1ca624cef419b6b4db049ff9f7 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 1 Jul 2024 15:00:31 +0200 Subject: [PATCH 255/286] Ignore `dyn.NilValue` when traversing value from `dyn.Map` (#1547) ## Changes The map function ignores cases where either a key in a map is not present or an index in a sequence is out of bounds. As of recently, we retain nil values as valid values in a configuration tree. As such, it makes sense to also ignore cases where a map or sequence is expected but nil is found. This is semantically no different from an empty map where a key is not found. Without this fix, all calls to `dyn.Map` would need to be updated with nil-checks at every path component. Related PRs: * #1507 * #1511 ## Tests Unit tests pass. --- libs/dyn/visit.go | 42 ++++++++++++++++++++++++++++++++++---- libs/dyn/visit_map.go | 9 ++++++-- libs/dyn/visit_map_test.go | 22 ++++++++++++++++---- 3 files changed, 63 insertions(+), 10 deletions(-) diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index 3fe356194..4d3cf5014 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -6,6 +6,28 @@ import ( "slices" ) +// This error is returned if the path indicates that a map or sequence is expected, but the value is nil. +type cannotTraverseNilError struct { + p Path +} + +func (e cannotTraverseNilError) Error() string { + component := e.p[len(e.p)-1] + switch { + case component.isKey(): + return fmt.Sprintf("expected a map to index %q, found nil", e.p) + case component.isIndex(): + return fmt.Sprintf("expected a sequence to index %q, found nil", e.p) + default: + panic("invalid component") + } +} + +func IsCannotTraverseNilError(err error) bool { + var target cannotTraverseNilError + return errors.As(err, &target) +} + type noSuchKeyError struct { p Path } @@ -70,11 +92,17 @@ func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts switch { case component.isKey(): // Expect a map to be set if this is a key. - m, ok := v.AsMap() - if !ok { + switch v.Kind() { + case KindMap: + // OK + case KindNil: + return InvalidValue, cannotTraverseNilError{path} + default: return InvalidValue, fmt.Errorf("expected a map to index %q, found %s", path, v.Kind()) } + m := v.MustMap() + // Lookup current value in the map. ev, ok := m.GetByString(component.key) if !ok { @@ -103,11 +131,17 @@ func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts case component.isIndex(): // Expect a sequence to be set if this is an index. - s, ok := v.AsSequence() - if !ok { + switch v.Kind() { + case KindSequence: + // OK + case KindNil: + return InvalidValue, cannotTraverseNilError{path} + default: return InvalidValue, fmt.Errorf("expected a sequence to index %q, found %s", path, v.Kind()) } + s := v.MustSequence() + // Lookup current value in the sequence. if component.index < 0 || component.index >= len(s) { return InvalidValue, indexOutOfBoundsError{path} diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index f5cfea311..56a9cf9f3 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -10,9 +10,12 @@ type MapFunc func(Path, Value) (Value, error) // Foreach returns a [MapFunc] that applies the specified [MapFunc] to each // value in a map or sequence and returns the new map or sequence. +// If the input is nil, it returns nil. func Foreach(fn MapFunc) MapFunc { return func(p Path, v Value) (Value, error) { switch v.Kind() { + case KindNil: + return v, nil case KindMap: m := v.MustMap().Clone() for _, pair := range m.Pairs() { @@ -75,8 +78,10 @@ func MapByPattern(v Value, p Pattern, fn MapFunc) (Value, error) { return nv, nil } - // Return original value if a key or index is missing. - if IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { + // Return original value if: + // - any map or sequence is a nil, or + // - a key or index is missing + if IsCannotTraverseNilError(err) || IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { return v, nil } diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index df6bad496..2cea0913b 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -20,11 +20,14 @@ func TestMapWithEmptyPath(t *testing.T) { } func TestMapOnNilValue(t *testing.T) { + var nv dyn.Value var err error - _, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Key("foo")), nil) - assert.ErrorContains(t, err, `expected a map to index "foo", found nil`) - _, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Index(42)), nil) - assert.ErrorContains(t, err, `expected a sequence to index "[42]", found nil`) + nv, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Key("foo")), nil) + assert.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) + nv, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Index(42)), nil) + assert.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) } func TestMapFuncOnMap(t *testing.T) { @@ -269,6 +272,17 @@ func TestMapForeachOnOtherError(t *testing.T) { assert.ErrorContains(t, err, "expected a map or sequence, found int") } +func TestMapForeachOnNil(t *testing.T) { + vin := dyn.NilValue + + // Check that if foreach is applied to nil, it returns nil. + vout, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + return dyn.InvalidValue, nil + })) + assert.NoError(t, err) + assert.Equal(t, dyn.NilValue, vout) +} + func TestMapByPatternOnNilValue(t *testing.T) { var err error _, err = dyn.MapByPattern(dyn.NilValue, dyn.NewPattern(dyn.AnyKey()), nil) From a0df54ac4131a4eb6c91e2a5da153c22cdd256e2 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 1 Jul 2024 15:08:50 +0200 Subject: [PATCH 256/286] Add extra tests for the sync block (#1548) ## Changes Issue #1545 describes how a nil entry in the sync block caused an error. The fix for this issue is in #1547. This change adds end-to-end test coverage. ## Tests New test passes on top of #1547. --- bundle/tests/override_sync_test.go | 41 ------------ bundle/tests/sync/nil/databricks.yml | 19 ++++++ bundle/tests/sync/nil_root/databricks.yml | 17 +++++ .../override}/databricks.yml | 2 +- .../override_no_root}/databricks.yml | 2 +- .../sync_include_exclude_no_matches_test.go | 4 +- bundle/tests/sync_test.go | 65 +++++++++++++++++++ 7 files changed, 105 insertions(+), 45 deletions(-) delete mode 100644 bundle/tests/override_sync_test.go create mode 100644 bundle/tests/sync/nil/databricks.yml create mode 100644 bundle/tests/sync/nil_root/databricks.yml rename bundle/tests/{override_sync => sync/override}/databricks.yml (93%) rename bundle/tests/{override_sync_no_root => sync/override_no_root}/databricks.yml (90%) create mode 100644 bundle/tests/sync_test.go diff --git a/bundle/tests/override_sync_test.go b/bundle/tests/override_sync_test.go deleted file mode 100644 index 64f28e377..000000000 --- a/bundle/tests/override_sync_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package config_tests - -import ( - "path/filepath" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/stretchr/testify/assert" -) - -func TestOverrideSyncTarget(t *testing.T) { - var b *bundle.Bundle - - b = loadTarget(t, "./override_sync", "development") - assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("tests/*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync", "staging") - assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync", "prod") - assert.ElementsMatch(t, []string{filepath.FromSlash("src/*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) -} - -func TestOverrideSyncTargetNoRootSync(t *testing.T) { - var b *bundle.Bundle - - b = loadTarget(t, "./override_sync_no_root", "development") - assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync_no_root", "staging") - assert.ElementsMatch(t, []string{filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync_no_root", "prod") - assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) -} diff --git a/bundle/tests/sync/nil/databricks.yml b/bundle/tests/sync/nil/databricks.yml new file mode 100644 index 000000000..a8b4b901e --- /dev/null +++ b/bundle/tests/sync/nil/databricks.yml @@ -0,0 +1,19 @@ +bundle: + name: sync_nil + +workspace: + host: https://acme.cloud.databricks.com/ + +sync: + include: ~ + exclude: ~ + +targets: + development: + + staging: + sync: + include: + - tests/* + exclude: + - dist diff --git a/bundle/tests/sync/nil_root/databricks.yml b/bundle/tests/sync/nil_root/databricks.yml new file mode 100644 index 000000000..44e6c48ea --- /dev/null +++ b/bundle/tests/sync/nil_root/databricks.yml @@ -0,0 +1,17 @@ +bundle: + name: sync_nil_root + +workspace: + host: https://acme.cloud.databricks.com/ + +sync: ~ + +targets: + development: + + staging: + sync: + include: + - tests/* + exclude: + - dist diff --git a/bundle/tests/override_sync/databricks.yml b/bundle/tests/sync/override/databricks.yml similarity index 93% rename from bundle/tests/override_sync/databricks.yml rename to bundle/tests/sync/override/databricks.yml index 1417b8644..8bb0e1def 100644 --- a/bundle/tests/override_sync/databricks.yml +++ b/bundle/tests/sync/override/databricks.yml @@ -1,5 +1,5 @@ bundle: - name: override_sync + name: sync_override workspace: host: https://acme.cloud.databricks.com/ diff --git a/bundle/tests/override_sync_no_root/databricks.yml b/bundle/tests/sync/override_no_root/databricks.yml similarity index 90% rename from bundle/tests/override_sync_no_root/databricks.yml rename to bundle/tests/sync/override_no_root/databricks.yml index 109d8da1f..bd1bfe8e0 100644 --- a/bundle/tests/override_sync_no_root/databricks.yml +++ b/bundle/tests/sync/override_no_root/databricks.yml @@ -1,5 +1,5 @@ bundle: - name: override_sync + name: sync_override_no_root workspace: host: https://acme.cloud.databricks.com/ diff --git a/bundle/tests/sync_include_exclude_no_matches_test.go b/bundle/tests/sync_include_exclude_no_matches_test.go index 135e2faac..94cedbaa6 100644 --- a/bundle/tests/sync_include_exclude_no_matches_test.go +++ b/bundle/tests/sync_include_exclude_no_matches_test.go @@ -13,7 +13,7 @@ import ( ) func TestSyncIncludeExcludeNoMatchesTest(t *testing.T) { - b := loadTarget(t, "./override_sync", "development") + b := loadTarget(t, "./sync/override", "development") diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.ValidateSyncPatterns()) require.Len(t, diags, 3) @@ -21,7 +21,7 @@ func TestSyncIncludeExcludeNoMatchesTest(t *testing.T) { require.Equal(t, diags[0].Severity, diag.Warning) require.Equal(t, diags[0].Summary, "Pattern dist does not match any files") - require.Equal(t, diags[0].Location.File, filepath.Join("override_sync", "databricks.yml")) + require.Equal(t, diags[0].Location.File, filepath.Join("sync", "override", "databricks.yml")) require.Equal(t, diags[0].Location.Line, 17) require.Equal(t, diags[0].Location.Column, 11) require.Equal(t, diags[0].Path.String(), "sync.exclude[0]") diff --git a/bundle/tests/sync_test.go b/bundle/tests/sync_test.go new file mode 100644 index 000000000..d08e889c3 --- /dev/null +++ b/bundle/tests/sync_test.go @@ -0,0 +1,65 @@ +package config_tests + +import ( + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/stretchr/testify/assert" +) + +func TestSyncOverride(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/override", "development") + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override", "prod") + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) +} + +func TestSyncOverrideNoRootSync(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/override_no_root", "development") + assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override_no_root", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override_no_root", "prod") + assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) +} + +func TestSyncNil(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/nil", "development") + assert.Nil(t, b.Config.Sync.Include) + assert.Nil(t, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/nil", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) +} + +func TestSyncNilRoot(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/nil_root", "development") + assert.Nil(t, b.Config.Sync.Include) + assert.Nil(t, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/nil_root", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) +} From 0d64975d36ef7dd9023953c9dc9560ff201541b1 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 2 Jul 2024 13:45:16 +0200 Subject: [PATCH 257/286] Fixed resolving variable references inside slice variable (#1550) ## Changes Fixes #1541 ## Tests Added regression unit test --------- Co-authored-by: Pieter Noordhuis --- .../mutator/resolve_variable_references.go | 8 ++- .../resolve_variable_references_test.go | 64 +++++++++++++++++++ 2 files changed, 70 insertions(+), 2 deletions(-) diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go index cddc85cba..61940be56 100644 --- a/bundle/config/mutator/resolve_variable_references.go +++ b/bundle/config/mutator/resolve_variable_references.go @@ -75,8 +75,12 @@ func lookupForComplexVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { } func skipResolvingInNonComplexVariables(v dyn.Value) bool { - _, ok := v.AsMap() - return !ok + switch v.Kind() { + case dyn.KindMap, dyn.KindSequence: + return false + default: + return true + } } func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { diff --git a/bundle/config/mutator/resolve_variable_references_test.go b/bundle/config/mutator/resolve_variable_references_test.go index 2b88a2495..7bb6f11a0 100644 --- a/bundle/config/mutator/resolve_variable_references_test.go +++ b/bundle/config/mutator/resolve_variable_references_test.go @@ -370,3 +370,67 @@ func TestResolveComplexVariableReferencesWithComplexVariablesError(t *testing.T) diags = bundle.Apply(ctx, b, bundle.Seq(ResolveVariableReferencesInComplexVariables(), ResolveVariableReferences("bundle", "workspace", "variables"))) require.ErrorContains(t, diags.Error(), "complex variables cannot contain references to another complex variables") } + +func TestResolveComplexVariableWithVarReference(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "package_version": { + Value: "1.0.0", + }, + "cluster_libraries": { + Value: [](map[string]any){ + { + "pypi": map[string]string{ + "package": "cicd_template==${var.package_version}", + }, + }, + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{}, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.tasks[0]") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("libraries")), dyn.V("${var.cluster_libraries}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, bundle.Seq( + ResolveVariableReferencesInComplexVariables(), + ResolveVariableReferences("bundle", "workspace", "variables"), + )) + require.NoError(t, diags.Error()) + require.Equal(t, "cicd_template==1.0.0", b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[0].Pypi.Package) +} From 3d2f7622bc89a7146914888295708a736c3bcb28 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 2 Jul 2024 14:40:39 +0200 Subject: [PATCH 258/286] Fixed bundle not loading when empty variable is defined (#1552) ## Changes Fixes #1544 ## Tests Added regression test --- bundle/tests/variables/empty/databricks.yml | 7 +++++++ bundle/tests/variables_test.go | 6 ++++++ libs/dyn/convert/to_typed.go | 5 +++++ libs/dyn/convert/to_typed_test.go | 7 +++++++ 4 files changed, 25 insertions(+) create mode 100644 bundle/tests/variables/empty/databricks.yml diff --git a/bundle/tests/variables/empty/databricks.yml b/bundle/tests/variables/empty/databricks.yml new file mode 100644 index 000000000..f90f6211c --- /dev/null +++ b/bundle/tests/variables/empty/databricks.yml @@ -0,0 +1,7 @@ +variables: + a: + description: empty variable + default: + +bundle: + name: empty${var.a} diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index 7cf0f72f0..51a23e5d5 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -193,3 +193,9 @@ func TestVariableTargetOverrides(t *testing.T) { }) } } + +func TestBundleWithEmptyVariableLoads(t *testing.T) { + b := load(t, "./variables/empty") + diags := bundle.Apply(context.Background(), b, mutator.SetVariables()) + require.ErrorContains(t, diags.Error(), "no value assigned to required variable a") +} diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index 8febe87ae..181c88cc9 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -282,6 +282,11 @@ func toTypedFloat(dst reflect.Value, src dyn.Value) error { } func toTypedInterface(dst reflect.Value, src dyn.Value) error { + if src.Kind() == dyn.KindNil { + dst.Set(reflect.Zero(dst.Type())) + return nil + } + dst.Set(reflect.ValueOf(src.AsAny())) return nil } diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index 5e37f2863..37d85539c 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -533,3 +533,10 @@ func TestToTypedAnyWithMap(t *testing.T) { require.NoError(t, err) assert.Equal(t, map[string]any{"foo": "bar", "bar": "baz"}, out) } + +func TestToTypedAnyWithNil(t *testing.T) { + var out any + err := ToTyped(&out, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, nil, out) +} From 5a0a6d73345539d2719e580a4634dd8ed9326079 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Tue, 2 Jul 2024 17:10:53 +0200 Subject: [PATCH 259/286] PythonMutator: add diagnostics (#1531) ## Changes Allow PyDABs to report `dyn.Diagnostics` by writing to `diagnostics.json` supplied as an argument, similar to `input.json` and `output.json` Such errors are not yet properly printed in `databricks bundle validate`, which will be fixed in a follow-up PR. ## Tests Unit tests --- .../mutator/python/python_diagnostics.go | 97 ++++++++++++++ .../mutator/python/python_diagnostics_test.go | 107 ++++++++++++++++ .../config/mutator/python/python_mutator.go | 121 ++++++++++++++---- .../mutator/python/python_mutator_test.go | 44 +++++-- 4 files changed, 334 insertions(+), 35 deletions(-) create mode 100644 bundle/config/mutator/python/python_diagnostics.go create mode 100644 bundle/config/mutator/python/python_diagnostics_test.go diff --git a/bundle/config/mutator/python/python_diagnostics.go b/bundle/config/mutator/python/python_diagnostics.go new file mode 100644 index 000000000..b8efc9ef7 --- /dev/null +++ b/bundle/config/mutator/python/python_diagnostics.go @@ -0,0 +1,97 @@ +package python + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type pythonDiagnostic struct { + Severity pythonSeverity `json:"severity"` + Summary string `json:"summary"` + Detail string `json:"detail,omitempty"` + Location pythonDiagnosticLocation `json:"location,omitempty"` + Path string `json:"path,omitempty"` +} + +type pythonDiagnosticLocation struct { + File string `json:"file"` + Line int `json:"line"` + Column int `json:"column"` +} + +type pythonSeverity = string + +const ( + pythonError pythonSeverity = "error" + pythonWarning pythonSeverity = "warning" +) + +// parsePythonDiagnostics parses diagnostics from the Python mutator. +// +// diagnostics file is newline-separated JSON objects with pythonDiagnostic structure. +func parsePythonDiagnostics(input io.Reader) (diag.Diagnostics, error) { + diags := diag.Diagnostics{} + decoder := json.NewDecoder(input) + + for decoder.More() { + var parsedLine pythonDiagnostic + + err := decoder.Decode(&parsedLine) + if err != nil { + return nil, fmt.Errorf("failed to parse diags: %s", err) + } + + severity, err := convertPythonSeverity(parsedLine.Severity) + if err != nil { + return nil, fmt.Errorf("failed to parse severity: %s", err) + } + + path, err := convertPythonPath(parsedLine.Path) + if err != nil { + return nil, fmt.Errorf("failed to parse path: %s", err) + } + + diag := diag.Diagnostic{ + Severity: severity, + Summary: parsedLine.Summary, + Detail: parsedLine.Detail, + Location: convertPythonLocation(parsedLine.Location), + Path: path, + } + + diags = diags.Append(diag) + } + + return diags, nil +} + +func convertPythonPath(path string) (dyn.Path, error) { + if path == "" { + return nil, nil + } + + return dyn.NewPathFromString(path) +} + +func convertPythonSeverity(severity pythonSeverity) (diag.Severity, error) { + switch severity { + case pythonError: + return diag.Error, nil + case pythonWarning: + return diag.Warning, nil + default: + return 0, fmt.Errorf("unexpected value: %s", severity) + } +} + +func convertPythonLocation(location pythonDiagnosticLocation) dyn.Location { + return dyn.Location{ + File: location.File, + Line: location.Line, + Column: location.Column, + } +} diff --git a/bundle/config/mutator/python/python_diagnostics_test.go b/bundle/config/mutator/python/python_diagnostics_test.go new file mode 100644 index 000000000..7b66e2537 --- /dev/null +++ b/bundle/config/mutator/python/python_diagnostics_test.go @@ -0,0 +1,107 @@ +package python + +import ( + "bytes" + "testing" + + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" +) + +func TestConvertPythonLocation(t *testing.T) { + location := convertPythonLocation(pythonDiagnosticLocation{ + File: "src/examples/file.py", + Line: 1, + Column: 2, + }) + + assert.Equal(t, dyn.Location{ + File: "src/examples/file.py", + Line: 1, + Column: 2, + }, location) +} + +type parsePythonDiagnosticsTest struct { + name string + input string + expected diag.Diagnostics +} + +func TestParsePythonDiagnostics(t *testing.T) { + + testCases := []parsePythonDiagnosticsTest{ + { + name: "short error with location", + input: `{"severity": "error", "summary": "error summary", "location": {"file": "src/examples/file.py", "line": 1, "column": 2}}`, + expected: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "error summary", + Location: dyn.Location{ + File: "src/examples/file.py", + Line: 1, + Column: 2, + }, + }, + }, + }, + { + name: "short error with path", + input: `{"severity": "error", "summary": "error summary", "path": "resources.jobs.job0.name"}`, + expected: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "error summary", + Path: dyn.MustPathFromString("resources.jobs.job0.name"), + }, + }, + }, + { + name: "empty file", + input: "", + expected: diag.Diagnostics{}, + }, + { + name: "newline file", + input: "\n", + expected: diag.Diagnostics{}, + }, + { + name: "warning with detail", + input: `{"severity": "warning", "summary": "warning summary", "detail": "warning detail"}`, + expected: diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: "warning summary", + Detail: "warning detail", + }, + }, + }, + { + name: "multiple errors", + input: `{"severity": "error", "summary": "error summary (1)"}` + "\n" + + `{"severity": "error", "summary": "error summary (2)"}`, + expected: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "error summary (1)", + }, + { + Severity: diag.Error, + Summary: "error summary (2)", + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + diags, err := parsePythonDiagnostics(bytes.NewReader([]byte(tc.input))) + + assert.NoError(t, err) + assert.Equal(t, tc.expected, diags) + }) + } +} diff --git a/bundle/config/mutator/python/python_mutator.go b/bundle/config/mutator/python/python_mutator.go index 73ddf9529..bef69d9c9 100644 --- a/bundle/config/mutator/python/python_mutator.go +++ b/bundle/config/mutator/python/python_mutator.go @@ -3,11 +3,14 @@ package python import ( "context" "encoding/json" + "errors" "fmt" "os" "path/filepath" "runtime" + "github.com/databricks/databricks-sdk-go/logger" + "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/bundle" @@ -87,6 +90,10 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno return diag.Errorf("\"experimental.pydabs.enabled\" can only be used when \"experimental.pydabs.venv_path\" is set") } + // mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics' + var mutateDiags diag.Diagnostics + var mutateDiagsHasError = errors.New("unexpected error") + err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { pythonPath := interpreterPath(experimental.PyDABs.VEnvPath) @@ -103,9 +110,10 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err) } - rightRoot, err := m.runPythonMutator(ctx, cacheDir, b.RootPath, pythonPath, leftRoot) - if err != nil { - return dyn.InvalidValue, err + rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.RootPath, pythonPath, leftRoot) + mutateDiags = diags + if diags.HasError() { + return dyn.InvalidValue, mutateDiagsHasError } visitor, err := createOverrideVisitor(ctx, m.phase) @@ -116,7 +124,15 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno return merge.Override(leftRoot, rightRoot, visitor) }) - return diag.FromErr(err) + if err == mutateDiagsHasError { + if !mutateDiags.HasError() { + panic("mutateDiags has no error, but error is expected") + } + + return mutateDiags + } + + return mutateDiags.Extend(diag.FromErr(err)) } func createCacheDir(ctx context.Context) (string, error) { @@ -138,9 +154,10 @@ func createCacheDir(ctx context.Context) (string, error) { return os.MkdirTemp("", "-pydabs") } -func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, error) { +func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) { inputPath := filepath.Join(cacheDir, "input.json") outputPath := filepath.Join(cacheDir, "output.json") + diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json") args := []string{ pythonPath, @@ -152,42 +169,77 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r inputPath, "--output", outputPath, + "--diagnostics", + diagnosticsPath, } - // we need to marshal dyn.Value instead of bundle.Config to JSON to support - // non-string fields assigned with bundle variables - rootConfigJson, err := json.Marshal(root.AsAny()) - if err != nil { - return dyn.InvalidValue, fmt.Errorf("failed to marshal root config: %w", err) - } - - err = os.WriteFile(inputPath, rootConfigJson, 0600) - if err != nil { - return dyn.InvalidValue, fmt.Errorf("failed to write input file: %w", err) + if err := writeInputFile(inputPath, root); err != nil { + return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err) } stderrWriter := newLogWriter(ctx, "stderr: ") stdoutWriter := newLogWriter(ctx, "stdout: ") - _, err = process.Background( + _, processErr := process.Background( ctx, args, process.WithDir(rootPath), process.WithStderrWriter(stderrWriter), process.WithStdoutWriter(stdoutWriter), ) - if err != nil { - return dyn.InvalidValue, fmt.Errorf("python mutator process failed: %w", err) + if processErr != nil { + logger.Debugf(ctx, "python mutator process failed: %s", processErr) } + pythonDiagnostics, pythonDiagnosticsErr := loadDiagnosticsFile(diagnosticsPath) + if pythonDiagnosticsErr != nil { + logger.Debugf(ctx, "failed to load diagnostics: %s", pythonDiagnosticsErr) + } + + // if diagnostics file exists, it gives the most descriptive errors + // if there is any error, we treat it as fatal error, and stop processing + if pythonDiagnostics.HasError() { + return dyn.InvalidValue, pythonDiagnostics + } + + // process can fail without reporting errors in diagnostics file or creating it, for instance, + // venv doesn't have PyDABs library installed + if processErr != nil { + return dyn.InvalidValue, diag.Errorf("python mutator process failed: %sw, use --debug to enable logging", processErr) + } + + // or we can fail to read diagnostics file, that should always be created + if pythonDiagnosticsErr != nil { + return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr) + } + + output, err := loadOutputFile(rootPath, outputPath) + if err != nil { + return dyn.InvalidValue, diag.Errorf("failed to load Python mutator output: %s", err) + } + + // we pass through pythonDiagnostic because it contains warnings + return output, pythonDiagnostics +} + +func writeInputFile(inputPath string, input dyn.Value) error { + // we need to marshal dyn.Value instead of bundle.Config to JSON to support + // non-string fields assigned with bundle variables + rootConfigJson, err := json.Marshal(input.AsAny()) + if err != nil { + return fmt.Errorf("failed to marshal input: %w", err) + } + + return os.WriteFile(inputPath, rootConfigJson, 0600) +} + +func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) { outputFile, err := os.Open(outputPath) if err != nil { - return dyn.InvalidValue, fmt.Errorf("failed to open Python mutator output: %w", err) + return dyn.InvalidValue, fmt.Errorf("failed to open output file: %w", err) } - defer func() { - _ = outputFile.Close() - }() + defer outputFile.Close() // we need absolute path because later parts of pipeline assume all paths are absolute // and this file will be used as location to resolve relative paths. @@ -204,24 +256,43 @@ func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, r generated, err := yamlloader.LoadYAML(virtualPath, outputFile) if err != nil { - return dyn.InvalidValue, fmt.Errorf("failed to parse Python mutator output: %w", err) + return dyn.InvalidValue, fmt.Errorf("failed to parse output file: %w", err) } normalized, diagnostic := convert.Normalize(config.Root{}, generated) if diagnostic.Error() != nil { - return dyn.InvalidValue, fmt.Errorf("failed to normalize Python mutator output: %w", diagnostic.Error()) + return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %w", diagnostic.Error()) } // warnings shouldn't happen because output should be already normalized // when it happens, it's a bug in the mutator, and should be treated as an error for _, d := range diagnostic.Filter(diag.Warning) { - return dyn.InvalidValue, fmt.Errorf("failed to normalize Python mutator output: %s", d.Summary) + return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %s", d.Summary) } return normalized, nil } +// loadDiagnosticsFile loads diagnostics from a file. +// +// It contains a list of warnings and errors that we should print to users. +// +// If the file doesn't exist, we return an error. We expect the file to always be +// created by the Python mutator, and it's absence means there are integration problems, +// and the diagnostics file was lost. If we treat non-existence as an empty diag.Diagnostics +// we risk loosing errors and warnings. +func loadDiagnosticsFile(path string) (diag.Diagnostics, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open diagnostics file: %w", err) + } + + defer file.Close() + + return parsePythonDiagnostics(file) +} + func createOverrideVisitor(ctx context.Context, phase phase) (merge.OverrideVisitor, error) { switch phase { case PythonMutatorPhaseLoad: diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go index e2c20386a..24e5ad60f 100644 --- a/bundle/config/mutator/python/python_mutator_test.go +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -74,12 +74,14 @@ func TestPythonMutator_load(t *testing.T) { }, } } - }`) + }`, + `{"severity": "warning", "summary": "job doesn't have any tasks", "location": {"file": "src/examples/file.py", "line": 10, "column": 5}}`, + ) mutator := PythonMutator(PythonMutatorPhaseLoad) - diag := bundle.Apply(ctx, b, mutator) + diags := bundle.Apply(ctx, b, mutator) - assert.NoError(t, diag.Error()) + assert.NoError(t, diags.Error()) assert.ElementsMatch(t, []string{"job0", "job1"}, maps.Keys(b.Config.Resources.Jobs)) @@ -90,6 +92,14 @@ func TestPythonMutator_load(t *testing.T) { if job1, ok := b.Config.Resources.Jobs["job1"]; ok { assert.Equal(t, "job_1", job1.Name) } + + assert.Equal(t, 1, len(diags)) + assert.Equal(t, "job doesn't have any tasks", diags[0].Summary) + assert.Equal(t, dyn.Location{ + File: "src/examples/file.py", + Line: 10, + Column: 5, + }, diags[0].Location) } func TestPythonMutator_load_disallowed(t *testing.T) { @@ -129,7 +139,7 @@ func TestPythonMutator_load_disallowed(t *testing.T) { } } } - }`) + }`, "") mutator := PythonMutator(PythonMutatorPhaseLoad) diag := bundle.Apply(ctx, b, mutator) @@ -174,7 +184,7 @@ func TestPythonMutator_init(t *testing.T) { } } } - }`) + }`, "") mutator := PythonMutator(PythonMutatorPhaseInit) diag := bundle.Apply(ctx, b, mutator) @@ -235,12 +245,12 @@ func TestPythonMutator_badOutput(t *testing.T) { } } } - }`) + }`, "") mutator := PythonMutator(PythonMutatorPhaseLoad) diag := bundle.Apply(ctx, b, mutator) - assert.EqualError(t, diag.Error(), "failed to normalize Python mutator output: unknown field: unknown_property") + assert.EqualError(t, diag.Error(), "failed to load Python mutator output: failed to normalize output: unknown field: unknown_property") } func TestPythonMutator_disabled(t *testing.T) { @@ -409,6 +419,13 @@ func TestCreateOverrideVisitor(t *testing.T) { } } +func TestLoadDiagnosticsFile_nonExistent(t *testing.T) { + // this is an important behaviour, see loadDiagnosticsFile docstring + _, err := loadDiagnosticsFile("non_existent_file.json") + + assert.Error(t, err) +} + func TestInterpreterPath(t *testing.T) { if runtime.GOOS == "windows" { assert.Equal(t, "venv\\Scripts\\python3.exe", interpreterPath("venv")) @@ -417,7 +434,7 @@ func TestInterpreterPath(t *testing.T) { } } -func withProcessStub(t *testing.T, args []string, stdout string) context.Context { +func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context { ctx := context.Background() ctx, stub := process.WithStub(ctx) @@ -429,17 +446,24 @@ func withProcessStub(t *testing.T, args []string, stdout string) context.Context inputPath := filepath.Join(cacheDir, "input.json") outputPath := filepath.Join(cacheDir, "output.json") + diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json") args = append(args, "--input", inputPath) args = append(args, "--output", outputPath) + args = append(args, "--diagnostics", diagnosticsPath) stub.WithCallback(func(actual *exec.Cmd) error { _, err := os.Stat(inputPath) assert.NoError(t, err) if reflect.DeepEqual(actual.Args, args) { - err := os.WriteFile(outputPath, []byte(stdout), 0600) - return err + err := os.WriteFile(outputPath, []byte(output), 0600) + require.NoError(t, err) + + err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0600) + require.NoError(t, err) + + return nil } else { return fmt.Errorf("unexpected command: %v", actual.Args) } From b9e3c9872388d91c00b9e66219c7d8fd3abeddfd Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Wed, 3 Jul 2024 09:22:03 +0200 Subject: [PATCH 260/286] PythonMutator: support omitempty in PyDABs (#1513) ## Changes PyDABs output can omit empty sequences/mappings because we don't track them as optional. There is no semantic difference between empty and missing, which makes omitting correct. CLI detects that we falsely modify input resources by deleting all empty collections. To handle that, we extend `dyn.Override` to allow visitors to ignore certain deletes. If we see that an empty sequence or mapping is deleted, we revert such delete. ## Tests Unit tests --------- Co-authored-by: Pieter Noordhuis --- .../config/mutator/python/python_mutator.go | 29 +++++++ .../mutator/python/python_mutator_test.go | 87 +++++++++++++++++++ libs/dyn/merge/override.go | 19 +++- libs/dyn/merge/override_test.go | 29 ++++++- 4 files changed, 161 insertions(+), 3 deletions(-) diff --git a/bundle/config/mutator/python/python_mutator.go b/bundle/config/mutator/python/python_mutator.go index bef69d9c9..26b6c54fc 100644 --- a/bundle/config/mutator/python/python_mutator.go +++ b/bundle/config/mutator/python/python_mutator.go @@ -313,6 +313,10 @@ func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return merge.OverrideVisitor{ VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { + if isOmitemptyDelete(left) { + return merge.ErrOverrideUndoDelete + } + return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) }, VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { @@ -346,6 +350,10 @@ func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return merge.OverrideVisitor{ VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { + if isOmitemptyDelete(left) { + return merge.ErrOverrideUndoDelete + } + if !valuePath.HasPrefix(jobsPath) { return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) } @@ -382,6 +390,27 @@ func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { } } +func isOmitemptyDelete(left dyn.Value) bool { + // PyDABs can omit empty sequences/mappings in output, because we don't track them as optional, + // there is no semantic difference between empty and missing, so we keep them as they were before + // PyDABs deleted them. + + switch left.Kind() { + case dyn.KindMap: + return left.MustMap().Len() == 0 + + case dyn.KindSequence: + return len(left.MustSequence()) == 0 + + case dyn.KindNil: + // map/sequence can be nil, for instance, bad YAML like: `foo:` + return true + + default: + return false + } +} + // interpreterPath returns platform-specific path to Python interpreter in the virtual environment. func interpreterPath(venvPath string) string { if runtime.GOOS == "windows" { diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go index 24e5ad60f..64a2a1a65 100644 --- a/bundle/config/mutator/python/python_mutator_test.go +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -10,6 +10,8 @@ import ( "runtime" "testing" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/bundle/env" "github.com/stretchr/testify/require" @@ -419,6 +421,91 @@ func TestCreateOverrideVisitor(t *testing.T) { } } +type overrideVisitorOmitemptyTestCase struct { + name string + path dyn.Path + left dyn.Value + phases []phase + expectedErr error +} + +func TestCreateOverrideVisitor_omitempty(t *testing.T) { + // PyDABs can omit empty sequences/mappings in output, because we don't track them as optional, + // there is no semantic difference between empty and missing, so we keep them as they were before + // PyDABs deleted them. + + allPhases := []phase{PythonMutatorPhaseLoad, PythonMutatorPhaseInit} + location := dyn.Location{ + File: "databricks.yml", + Line: 10, + Column: 20, + } + + testCases := []overrideVisitorOmitemptyTestCase{ + { + // this is not happening, but adding for completeness + name: "undo delete of empty variables", + path: dyn.MustPathFromString("variables"), + left: dyn.NewValue([]dyn.Value{}, location), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + { + name: "undo delete of empty job clusters", + path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"), + left: dyn.NewValue([]dyn.Value{}, location), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + { + name: "allow delete of non-empty job clusters", + path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"), + left: dyn.NewValue([]dyn.Value{dyn.NewValue("abc", location)}, location), + expectedErr: nil, + // deletions aren't allowed in 'load' phase + phases: []phase{PythonMutatorPhaseInit}, + }, + { + name: "undo delete of empty tags", + path: dyn.MustPathFromString("resources.jobs.job0.tags"), + left: dyn.NewValue(map[string]dyn.Value{}, location), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + { + name: "allow delete of non-empty tags", + path: dyn.MustPathFromString("resources.jobs.job0.tags"), + left: dyn.NewValue( + map[string]dyn.Value{"dev": dyn.NewValue("true", location)}, + location, + ), + expectedErr: nil, + // deletions aren't allowed in 'load' phase + phases: []phase{PythonMutatorPhaseInit}, + }, + { + name: "undo delete of nil", + path: dyn.MustPathFromString("resources.jobs.job0.tags"), + left: dyn.NilValue.WithLocation(location), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + } + + for _, tc := range testCases { + for _, phase := range tc.phases { + t.Run(tc.name+"-"+string(phase), func(t *testing.T) { + visitor, err := createOverrideVisitor(context.Background(), phase) + require.NoError(t, err) + + err = visitor.VisitDelete(tc.path, tc.left) + + assert.Equal(t, tc.expectedErr, err) + }) + } + } +} + func TestLoadDiagnosticsFile_nonExistent(t *testing.T) { // this is an important behaviour, see loadDiagnosticsFile docstring _, err := loadDiagnosticsFile("non_existent_file.json") diff --git a/libs/dyn/merge/override.go b/libs/dyn/merge/override.go index 81bbaa4d5..823fb1933 100644 --- a/libs/dyn/merge/override.go +++ b/libs/dyn/merge/override.go @@ -1,6 +1,7 @@ package merge import ( + "errors" "fmt" "github.com/databricks/cli/libs/dyn" @@ -13,6 +14,9 @@ import ( // For instance, it can disallow changes outside the specific path(s), or update // the location of the effective value. // +// Values returned by 'VisitInsert' and 'VisitUpdate' are used as the final value +// of the node. 'VisitDelete' can return ErrOverrideUndoDelete to undo delete. +// // 'VisitDelete' is called when a value is removed from mapping or sequence // 'VisitInsert' is called when a new value is added to mapping or sequence // 'VisitUpdate' is called when a leaf value is updated @@ -22,6 +26,8 @@ type OverrideVisitor struct { VisitUpdate func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) } +var ErrOverrideUndoDelete = errors.New("undo delete operation") + // Override overrides value 'leftRoot' with 'rightRoot', keeping 'location' if values // haven't changed. Preserving 'location' is important to preserve the original source of the value // for error reporting. @@ -111,7 +117,13 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy err := visitor.VisitDelete(path, leftPair.Value) - if err != nil { + // if 'delete' was undone, add it back + if errors.Is(err, ErrOverrideUndoDelete) { + err := out.Set(leftPair.Key, leftPair.Value) + if err != nil { + return dyn.NewMapping(), err + } + } else if err != nil { return dyn.NewMapping(), err } } @@ -186,7 +198,10 @@ func overrideSequence(basePath dyn.Path, left []dyn.Value, right []dyn.Value, vi path := basePath.Append(dyn.Index(i)) err := visitor.VisitDelete(path, left[i]) - if err != nil { + // if 'delete' was undone, add it back + if errors.Is(err, ErrOverrideUndoDelete) { + values = append(values, left[i]) + } else if err != nil { return nil, err } } diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go index d8fd4e178..d9ca97486 100644 --- a/libs/dyn/merge/override_test.go +++ b/libs/dyn/merge/override_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/databricks/cli/libs/dyn" assert "github.com/databricks/cli/libs/dyn/dynassert" ) @@ -393,6 +395,24 @@ func TestOverride_Primitive(t *testing.T) { assert.Equal(t, expected, actual) } }) + + if len(tc.state.removed) > 0 { + t.Run(tc.name+" - visitor can undo delete", func(t *testing.T) { + s, visitor := createVisitor(visitorOpts{deleteError: ErrOverrideUndoDelete}) + out, err := override(dyn.EmptyPath, tc.left, tc.right, visitor) + require.NoError(t, err) + + for _, removed := range s.removed { + expected, err := dyn.GetByPath(tc.left, dyn.MustPathFromString(removed)) + require.NoError(t, err) + + actual, err := dyn.GetByPath(out, dyn.MustPathFromString(removed)) + + assert.NoError(t, err) + assert.Equal(t, expected, actual) + } + }) + } } } } @@ -449,6 +469,7 @@ type visitorState struct { type visitorOpts struct { error error + deleteError error returnValue *dyn.Value } @@ -470,7 +491,13 @@ func createVisitor(opts visitorOpts) (*visitorState, OverrideVisitor) { VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { s.removed = append(s.removed, valuePath.String()) - return opts.error + if opts.error != nil { + return opts.error + } else if opts.deleteError != nil { + return opts.deleteError + } else { + return nil + } }, VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { s.added = append(s.added, valuePath.String()) From 4787edba3635b1efe4b8c03cae24b3406edd2daa Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Wed, 3 Jul 2024 10:33:23 +0200 Subject: [PATCH 261/286] PythonMutator: allow insert 'resources' and 'resources.jobs' (#1555) ## Changes Allow insert 'resources' and 'resources.jobs' because they can be absent in incoming bundle. ## Tests Unit tests --- .../config/mutator/python/python_mutator.go | 12 ++++++++++ .../mutator/python/python_mutator_test.go | 24 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/bundle/config/mutator/python/python_mutator.go b/bundle/config/mutator/python/python_mutator.go index 26b6c54fc..f9febe5b5 100644 --- a/bundle/config/mutator/python/python_mutator.go +++ b/bundle/config/mutator/python/python_mutator.go @@ -309,6 +309,7 @@ func createOverrideVisitor(ctx context.Context, phase phase) (merge.OverrideVisi // During load, it's only possible to create new resources, and not modify or // delete existing ones. func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { + resourcesPath := dyn.NewPath(dyn.Key("resources")) jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) return merge.OverrideVisitor{ @@ -320,6 +321,11 @@ func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) }, VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { + // insert 'resources' or 'resources.jobs' if it didn't exist before + if valuePath.Equal(resourcesPath) || valuePath.Equal(jobsPath) { + return right, nil + } + if !valuePath.HasPrefix(jobsPath) { return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) } @@ -346,6 +352,7 @@ func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { // During the init phase it's possible to create new resources, modify existing // resources, but not delete existing resources. func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { + resourcesPath := dyn.NewPath(dyn.Key("resources")) jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) return merge.OverrideVisitor{ @@ -370,6 +377,11 @@ func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { return nil }, VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { + // insert 'resources' or 'resources.jobs' if it didn't exist before + if valuePath.Equal(resourcesPath) || valuePath.Equal(jobsPath) { + return right, nil + } + if !valuePath.HasPrefix(jobsPath) { return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) } diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go index 64a2a1a65..9a0ed8c3a 100644 --- a/bundle/config/mutator/python/python_mutator_test.go +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -325,6 +325,18 @@ func TestCreateOverrideVisitor(t *testing.T) { deletePath: dyn.MustPathFromString("resources.jobs.job0"), deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), }, + { + name: "load: can insert 'resources'", + phase: PythonMutatorPhaseLoad, + insertPath: dyn.MustPathFromString("resources"), + insertError: nil, + }, + { + name: "load: can insert 'resources.jobs'", + phase: PythonMutatorPhaseLoad, + insertPath: dyn.MustPathFromString("resources.jobs"), + insertError: nil, + }, { name: "load: can insert a job", phase: PythonMutatorPhaseLoad, @@ -357,6 +369,18 @@ func TestCreateOverrideVisitor(t *testing.T) { deletePath: dyn.MustPathFromString("resources.jobs.job0"), deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), }, + { + name: "init: can insert 'resources'", + phase: PythonMutatorPhaseInit, + insertPath: dyn.MustPathFromString("resources"), + insertError: nil, + }, + { + name: "init: can insert 'resources.jobs'", + phase: PythonMutatorPhaseInit, + insertPath: dyn.MustPathFromString("resources.jobs"), + insertError: nil, + }, { name: "init: can insert a job", phase: PythonMutatorPhaseInit, From b3c044c461be0067ff355973a65d1c1a0e6b5db0 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 3 Jul 2024 12:13:22 +0200 Subject: [PATCH 262/286] Use `vfs.Path` for filesystem interaction (#1554) ## Changes Note: this doesn't cover _all_ filesystem interaction. To intercept calls where read or stat files to determine their type, we need a layer between our code and the `os` package calls that interact with the local file system. Interception is necessary to accommodate differences between a regular local file system and the FUSE-mounted Workspace File System when running the CLI on DBR. This change makes use of #1452 in the bundle struct. It uses #1525 to access the bundle variable in path rewriting. ## Tests * Unit tests pass. * Integration tests pass. --- bundle/bundle.go | 12 ++++-- bundle/bundle_read_only.go | 5 +++ bundle/config/mutator/load_git_details.go | 3 +- bundle/config/mutator/translate_paths.go | 7 ++-- bundle/config/mutator/translate_paths_test.go | 40 +++++++++++++------ .../config/validate/validate_sync_patterns.go | 3 +- bundle/deploy/files/sync.go | 3 +- bundle/deploy/state.go | 10 ++--- bundle/deploy/state_pull.go | 2 +- bundle/deploy/state_pull_test.go | 5 ++- bundle/deploy/state_test.go | 5 ++- cmd/sync/sync_test.go | 4 +- 12 files changed, 61 insertions(+), 38 deletions(-) diff --git a/bundle/bundle.go b/bundle/bundle.go index 482614b9a..032d98abc 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -17,7 +17,6 @@ import ( "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/bundle/metadata" "github.com/databricks/cli/libs/fileset" - "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" @@ -36,6 +35,10 @@ type Bundle struct { // It is set when we instantiate a new bundle instance. RootPath string + // BundleRoot is a virtual filesystem path to the root of the bundle. + // Exclusively use this field for filesystem operations. + BundleRoot vfs.Path + Config config.Root // Metadata about the bundle deployment. This is the interface Databricks services @@ -73,7 +76,8 @@ type Bundle struct { func Load(ctx context.Context, path string) (*Bundle, error) { b := &Bundle{ - RootPath: filepath.Clean(path), + RootPath: filepath.Clean(path), + BundleRoot: vfs.MustNew(path), } configFile, err := config.FileNames.FindInPath(path) if err != nil { @@ -208,12 +212,12 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { } func (b *Bundle) GitRepository() (*git.Repository, error) { - rootPath, err := folders.FindDirWithLeaf(b.RootPath, ".git") + _, err := vfs.FindLeafInTree(b.BundleRoot, ".git") if err != nil { return nil, fmt.Errorf("unable to locate repository root: %w", err) } - return git.NewRepository(vfs.MustNew(rootPath)) + return git.NewRepository(b.BundleRoot) } // AuthEnv returns a map with environment variables and their values diff --git a/bundle/bundle_read_only.go b/bundle/bundle_read_only.go index e4a4f9936..59084f2ac 100644 --- a/bundle/bundle_read_only.go +++ b/bundle/bundle_read_only.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" ) @@ -23,6 +24,10 @@ func (r ReadOnlyBundle) RootPath() string { return r.b.RootPath } +func (r ReadOnlyBundle) BundleRoot() vfs.Path { + return r.b.BundleRoot +} + func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient { return r.b.WorkspaceClient() } diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index d8b76f39e..9b1c963c9 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -8,7 +8,6 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/log" - "github.com/databricks/cli/libs/vfs" ) type loadGitDetails struct{} @@ -23,7 +22,7 @@ func (m *loadGitDetails) Name() string { func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Load relevant git repository - repo, err := git.NewRepository(vfs.MustNew(b.RootPath)) + repo, err := git.NewRepository(b.BundleRoot) if err != nil { return diag.FromErr(err) } diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 4224eafd4..a01d3d6a7 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -6,7 +6,6 @@ import ( "fmt" "io/fs" "net/url" - "os" "path" "path/filepath" "strings" @@ -119,7 +118,7 @@ func (t *translateContext) rewritePath( } func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - nb, _, err := notebook.Detect(localFullPath) + nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath)) if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("notebook %s not found", literal) } @@ -135,7 +134,7 @@ func (t *translateContext) translateNotebookPath(literal, localFullPath, localRe } func (t *translateContext) translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - nb, _, err := notebook.Detect(localFullPath) + nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath)) if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("file %s not found", literal) } @@ -149,7 +148,7 @@ func (t *translateContext) translateFilePath(literal, localFullPath, localRelPat } func (t *translateContext) translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - info, err := os.Stat(localFullPath) + info, err := t.b.BundleRoot.Stat(filepath.ToSlash(localRelPath)) if err != nil { return "", err } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 29afb9972..8476ee38a 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -37,7 +38,8 @@ func touchEmptyFile(t *testing.T, path string) { func TestTranslatePathsSkippedWithGitSource(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -107,7 +109,8 @@ func TestTranslatePaths(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -274,7 +277,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -368,7 +372,8 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -401,7 +406,8 @@ func TestJobNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -431,7 +437,8 @@ func TestJobFileDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -461,7 +468,8 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ @@ -491,7 +499,8 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ @@ -522,7 +531,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -556,7 +566,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_file.py")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -590,7 +601,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_file.py")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -624,7 +636,8 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -659,7 +672,8 @@ func TestTranslatePathJobEnvironments(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "env2.py")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ diff --git a/bundle/config/validate/validate_sync_patterns.go b/bundle/config/validate/validate_sync_patterns.go index 832efede9..a04c10776 100644 --- a/bundle/config/validate/validate_sync_patterns.go +++ b/bundle/config/validate/validate_sync_patterns.go @@ -8,7 +8,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/fileset" - "github.com/databricks/cli/libs/vfs" "golang.org/x/sync/errgroup" ) @@ -51,7 +50,7 @@ func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (di index := i p := pattern errs.Go(func() error { - fs, err := fileset.NewGlobSet(vfs.MustNew(rb.RootPath()), []string{p}) + fs, err := fileset.NewGlobSet(rb.BundleRoot(), []string{p}) if err != nil { return err } diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index 8d6efdae3..a308668d3 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -6,7 +6,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/sync" - "github.com/databricks/cli/libs/vfs" ) func GetSync(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.Sync, error) { @@ -29,7 +28,7 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp } opts := &sync.SyncOptions{ - LocalPath: vfs.MustNew(rb.RootPath()), + LocalPath: rb.BundleRoot(), RemotePath: rb.Config().Workspace.FilePath, Include: includes, Exclude: rb.Config().Sync.Exclude, diff --git a/bundle/deploy/state.go b/bundle/deploy/state.go index ccff64fe7..97048811b 100644 --- a/bundle/deploy/state.go +++ b/bundle/deploy/state.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "io/fs" - "os" "path/filepath" "time" @@ -59,8 +58,8 @@ type entry struct { info fs.FileInfo } -func newEntry(path string) *entry { - info, err := os.Stat(path) +func newEntry(root vfs.Path, path string) *entry { + info, err := root.Stat(path) if err != nil { return &entry{path, nil} } @@ -111,11 +110,10 @@ func FromSlice(files []fileset.File) (Filelist, error) { return f, nil } -func (f Filelist) ToSlice(basePath string) []fileset.File { +func (f Filelist) ToSlice(root vfs.Path) []fileset.File { var files []fileset.File - root := vfs.MustNew(basePath) for _, file := range f { - entry := newEntry(filepath.Join(basePath, file.LocalPath)) + entry := newEntry(root, filepath.ToSlash(file.LocalPath)) // Snapshots created with versions <= v0.220.0 use platform-specific // paths (i.e. with backslashes). Files returned by [libs/fileset] always diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go index 57b38ec6c..24ed9d360 100644 --- a/bundle/deploy/state_pull.go +++ b/bundle/deploy/state_pull.go @@ -85,7 +85,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic } log.Infof(ctx, "Creating new snapshot") - snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.RootPath), opts) + snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.BundleRoot), opts) if err != nil { return diag.FromErr(err) } diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index 409895a25..38f0b4021 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -17,6 +17,7 @@ import ( "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/sync" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -59,8 +60,10 @@ func testStatePull(t *testing.T, opts statePullOpts) { return f, nil }} + tmpDir := t.TempDir() b := &bundle.Bundle{ - RootPath: t.TempDir(), + RootPath: tmpDir, + BundleRoot: vfs.MustNew(tmpDir), Config: config.Root{ Bundle: config.Bundle{ Target: "default", diff --git a/bundle/deploy/state_test.go b/bundle/deploy/state_test.go index efa051ab6..5e1e54230 100644 --- a/bundle/deploy/state_test.go +++ b/bundle/deploy/state_test.go @@ -32,7 +32,8 @@ func TestFromSlice(t *testing.T) { func TestToSlice(t *testing.T) { tmpDir := t.TempDir() - fileset := fileset.New(vfs.MustNew(tmpDir)) + root := vfs.MustNew(tmpDir) + fileset := fileset.New(root) testutil.Touch(t, tmpDir, "test1.py") testutil.Touch(t, tmpDir, "test2.py") testutil.Touch(t, tmpDir, "test3.py") @@ -44,7 +45,7 @@ func TestToSlice(t *testing.T) { require.NoError(t, err) require.Len(t, f, 3) - s := f.ToSlice(tmpDir) + s := f.ToSlice(root) require.Len(t, s, 3) for _, file := range s { diff --git a/cmd/sync/sync_test.go b/cmd/sync/sync_test.go index b741e7b16..564aeae56 100644 --- a/cmd/sync/sync_test.go +++ b/cmd/sync/sync_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -16,7 +17,8 @@ import ( func TestSyncOptionsFromBundle(t *testing.T) { tempDir := t.TempDir() b := &bundle.Bundle{ - RootPath: tempDir, + RootPath: tempDir, + BundleRoot: vfs.MustNew(tempDir), Config: config.Root{ Bundle: config.Bundle{ Target: "default", From f14dded946f2093e9a33431ab0cce638642b229e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 3 Jul 2024 13:55:42 +0200 Subject: [PATCH 263/286] Replace `vfs.Path` with extension-aware filer when running on DBR (#1556) ## Changes The FUSE mount of the workspace file system on DBR doesn't include file extensions for notebooks. When these notebooks are checked into a repository, they do have an extension. PR #1457 added a filer type that is aware of this disparity and makes these notebooks show up as if they do have these extensions. This change swaps out the native `vfs.Path` with one that uses this filer when running on DBR. Follow up: consolidate between interfaces exported by `filer.Filer` and `vfs.Path`. ## Tests * Unit tests pass * (Manually ran a snapshot build on DBR against a bundle with notebooks) --------- Co-authored-by: Andrew Nester --- bundle/config/mutator/configure_wsfs.go | 50 ++++++++++++++++ bundle/phases/initialize.go | 4 ++ libs/vfs/filer.go | 66 +++++++++++++++++++++ libs/vfs/filer_test.go | 79 +++++++++++++++++++++++++ 4 files changed, 199 insertions(+) create mode 100644 bundle/config/mutator/configure_wsfs.go create mode 100644 libs/vfs/filer.go create mode 100644 libs/vfs/filer_test.go diff --git a/bundle/config/mutator/configure_wsfs.go b/bundle/config/mutator/configure_wsfs.go new file mode 100644 index 000000000..17af4828f --- /dev/null +++ b/bundle/config/mutator/configure_wsfs.go @@ -0,0 +1,50 @@ +package mutator + +import ( + "context" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/vfs" +) + +const envDatabricksRuntimeVersion = "DATABRICKS_RUNTIME_VERSION" + +type configureWSFS struct{} + +func ConfigureWSFS() bundle.Mutator { + return &configureWSFS{} +} + +func (m *configureWSFS) Name() string { + return "ConfigureWSFS" +} + +func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + root := b.BundleRoot.Native() + + // The bundle root must be located in /Workspace/ + if !strings.HasPrefix(root, "/Workspace/") { + return nil + } + + // The executable must be running on DBR. + if _, ok := env.Lookup(ctx, envDatabricksRuntimeVersion); !ok { + return nil + } + + // If so, swap out vfs.Path instance of the sync root with one that + // makes all Workspace File System interactions extension aware. + p, err := vfs.NewFilerPath(ctx, root, func(path string) (filer.Filer, error) { + return filer.NewWorkspaceFilesExtensionsClient(b.WorkspaceClient(), path) + }) + if err != nil { + return diag.FromErr(err) + } + + b.BundleRoot = p + return nil +} diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 79fca9df6..a32de2c56 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -46,6 +46,10 @@ func Initialize() bundle.Mutator { mutator.ProcessTargetMode(), mutator.DefaultQueueing(), mutator.ExpandPipelineGlobPaths(), + + // Configure use of WSFS for reads if the CLI is running on Databricks. + mutator.ConfigureWSFS(), + mutator.TranslatePaths(), python.WrapperWarning(), permissions.ApplyBundlePermissions(), diff --git a/libs/vfs/filer.go b/libs/vfs/filer.go new file mode 100644 index 000000000..54f672e06 --- /dev/null +++ b/libs/vfs/filer.go @@ -0,0 +1,66 @@ +package vfs + +import ( + "context" + "io/fs" + "path/filepath" + + "github.com/databricks/cli/libs/filer" +) + +type filerPath struct { + ctx context.Context + path string + fs FS + + construct func(path string) (filer.Filer, error) +} + +func NewFilerPath(ctx context.Context, path string, construct func(path string) (filer.Filer, error)) (Path, error) { + f, err := construct(path) + if err != nil { + return nil, err + } + + return &filerPath{ + ctx: ctx, + path: path, + fs: filer.NewFS(ctx, f).(FS), + + construct: construct, + }, nil +} + +func (f filerPath) Open(name string) (fs.File, error) { + return f.fs.Open(name) +} + +func (f filerPath) Stat(name string) (fs.FileInfo, error) { + return f.fs.Stat(name) +} + +func (f filerPath) ReadDir(name string) ([]fs.DirEntry, error) { + return f.fs.ReadDir(name) +} + +func (f filerPath) ReadFile(name string) ([]byte, error) { + return f.fs.ReadFile(name) +} + +func (f filerPath) Parent() Path { + if f.path == "/" { + return nil + } + + dir := filepath.Dir(f.path) + nf, err := NewFilerPath(f.ctx, dir, f.construct) + if err != nil { + panic(err) + } + + return nf +} + +func (f filerPath) Native() string { + return f.path +} diff --git a/libs/vfs/filer_test.go b/libs/vfs/filer_test.go new file mode 100644 index 000000000..ee1397521 --- /dev/null +++ b/libs/vfs/filer_test.go @@ -0,0 +1,79 @@ +package vfs + +import ( + "context" + "errors" + "io/fs" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFilerPath(t *testing.T) { + ctx := context.Background() + wd, err := os.Getwd() + require.NoError(t, err) + + // Create a new filer-backed path. + p, err := NewFilerPath(ctx, filepath.FromSlash(wd), filer.NewLocalClient) + require.NoError(t, err) + + // Open self. + f, err := p.Open("filer_test.go") + require.NoError(t, err) + defer f.Close() + + // Run stat on self. + s, err := f.Stat() + require.NoError(t, err) + assert.Equal(t, "filer_test.go", s.Name()) + assert.GreaterOrEqual(t, int(s.Size()), 128) + + // Read some bytes. + buf := make([]byte, 1024) + _, err = f.Read(buf) + require.NoError(t, err) + assert.True(t, strings.HasPrefix(string(buf), "package vfs")) + + // Open non-existent file. + _, err = p.Open("doesntexist_test.go") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // Stat self. + s, err = p.Stat("filer_test.go") + require.NoError(t, err) + assert.Equal(t, "filer_test.go", s.Name()) + assert.GreaterOrEqual(t, int(s.Size()), 128) + + // Stat non-existent file. + _, err = p.Stat("doesntexist_test.go") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // ReadDir self. + entries, err := p.ReadDir(".") + require.NoError(t, err) + assert.GreaterOrEqual(t, len(entries), 1) + + // ReadDir non-existent directory. + _, err = p.ReadDir("doesntexist") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // ReadFile self. + buf, err = p.ReadFile("filer_test.go") + require.NoError(t, err) + assert.True(t, strings.HasPrefix(string(buf), "package vfs")) + + // ReadFile non-existent file. + _, err = p.ReadFile("doesntexist_test.go") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // Parent self. + pp := p.Parent() + require.NotNil(t, pp) + assert.Equal(t, filepath.Join(pp.Native(), "vfs"), p.Native()) +} From 2a73d7788b45d03abeac2f492507dfc6e24fe0bb Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 3 Jul 2024 14:24:42 +0200 Subject: [PATCH 264/286] Release v0.223.0 (#1557) Bundles: As of this release you can interact with bundles when running the CLI on DBR (e.g. via the Web Terminal). * Fix non-default project names not working in dbt-sql template ([#1500](https://github.com/databricks/cli/pull/1500)). * Improve `bundle validate` output ([#1532](https://github.com/databricks/cli/pull/1532)). * Fixed resolving variable references inside slice variable ([#1550](https://github.com/databricks/cli/pull/1550)). * Fixed bundle not loading when empty variable is defined ([#1552](https://github.com/databricks/cli/pull/1552)). * Use `vfs.Path` for filesystem interaction ([#1554](https://github.com/databricks/cli/pull/1554)). * Replace `vfs.Path` with extension-aware filer when running on DBR ([#1556](https://github.com/databricks/cli/pull/1556)). Internal: * merge.Override: Fix handling of dyn.NilValue ([#1530](https://github.com/databricks/cli/pull/1530)). * Compare `.Kind()` instead of direct equality checks on a `dyn.Value` ([#1520](https://github.com/databricks/cli/pull/1520)). * PythonMutator: register product in user agent extra ([#1533](https://github.com/databricks/cli/pull/1533)). * Ignore `dyn.NilValue` when traversing value from `dyn.Map` ([#1547](https://github.com/databricks/cli/pull/1547)). * Add extra tests for the sync block ([#1548](https://github.com/databricks/cli/pull/1548)). * PythonMutator: add diagnostics ([#1531](https://github.com/databricks/cli/pull/1531)). * PythonMutator: support omitempty in PyDABs ([#1513](https://github.com/databricks/cli/pull/1513)). * PythonMutator: allow insert 'resources' and 'resources.jobs' ([#1555](https://github.com/databricks/cli/pull/1555)). --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5fcc45b3..dc2775f0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,28 @@ # Version changelog +## 0.223.0 + +Bundles: + +As of this release you can interact with bundles when running the CLI on DBR (e.g. via the Web Terminal). + + * Fix non-default project names not working in dbt-sql template ([#1500](https://github.com/databricks/cli/pull/1500)). + * Improve `bundle validate` output ([#1532](https://github.com/databricks/cli/pull/1532)). + * Fixed resolving variable references inside slice variable ([#1550](https://github.com/databricks/cli/pull/1550)). + * Fixed bundle not loading when empty variable is defined ([#1552](https://github.com/databricks/cli/pull/1552)). + * Use `vfs.Path` for filesystem interaction ([#1554](https://github.com/databricks/cli/pull/1554)). + * Replace `vfs.Path` with extension-aware filer when running on DBR ([#1556](https://github.com/databricks/cli/pull/1556)). + +Internal: + * merge.Override: Fix handling of dyn.NilValue ([#1530](https://github.com/databricks/cli/pull/1530)). + * Compare `.Kind()` instead of direct equality checks on a `dyn.Value` ([#1520](https://github.com/databricks/cli/pull/1520)). + * PythonMutator: register product in user agent extra ([#1533](https://github.com/databricks/cli/pull/1533)). + * Ignore `dyn.NilValue` when traversing value from `dyn.Map` ([#1547](https://github.com/databricks/cli/pull/1547)). + * Add extra tests for the sync block ([#1548](https://github.com/databricks/cli/pull/1548)). + * PythonMutator: add diagnostics ([#1531](https://github.com/databricks/cli/pull/1531)). + * PythonMutator: support omitempty in PyDABs ([#1513](https://github.com/databricks/cli/pull/1513)). + * PythonMutator: allow insert 'resources' and 'resources.jobs' ([#1555](https://github.com/databricks/cli/pull/1555)). + ## 0.222.0 CLI: From 7d2aa357388ee4cd64f67646f6b20f8ee3152e01 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 3 Jul 2024 18:23:19 +0200 Subject: [PATCH 265/286] Fix logic error in #1532 (#1564) ## Changes This snuck into #1532 right before merging. The result is that error output is no longer logged. This includes actual execution errors as well as help output if arguments or flags are incorrectly specified. We don't have test coverage for the `root.Execute` function. This is to be fixed later. ## Tests Manually confirmed we observe error output again. --- cmd/root/root.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/root/root.go b/cmd/root/root.go index 91e91d368..61baa4da0 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -98,7 +98,7 @@ func Execute(cmd *cobra.Command) { // Run the command cmd, err := cmd.ExecuteContextC(ctx) - if err != nil && errors.Is(err, ErrAlreadyPrinted) { + if err != nil && !errors.Is(err, ErrAlreadyPrinted) { // If cmdio logger initialization succeeds, then this function logs with the // initialized cmdio logger, otherwise with the default cmdio logger cmdio.LogError(cmd.Context(), err) From bf275428b6150eab802c13eb57fe9f5e4a248891 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 3 Jul 2024 18:41:55 +0200 Subject: [PATCH 266/286] Release v0.223.1 (#1565) This bugfix release fixes missing error messages in v0.223.0. CLI: * Fix logic error in [#1532](https://github.com/databricks/cli/pull/1532) ([#1564](https://github.com/databricks/cli/pull/1564)). --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc2775f0a..16d81f822 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Version changelog +## 0.223.1 + +This bugfix release fixes missing error messages in v0.223.0. + +CLI: + * Fix logic error in [#1532](https://github.com/databricks/cli/pull/1532) ([#1564](https://github.com/databricks/cli/pull/1564)). + + ## 0.223.0 Bundles: From 324fa2e18b0227de3f36d18954e7df30a55806aa Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 4 Jul 2024 08:54:30 +0200 Subject: [PATCH 267/286] Update actions/upload-artifact to v4 (#1559) ## Changes This addresses a deprecation warning in our GHA output. Full release notes of v4 at https://github.com/actions/upload-artifact/releases/tag/v4.0.0 --- .github/workflows/release-snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index bd89417e2..9807059c7 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -33,21 +33,21 @@ jobs: args: release --snapshot --skip docker - name: Upload macOS binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cli_darwin_snapshot path: | dist/*_darwin_*/ - name: Upload Linux binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cli_linux_snapshot path: | dist/*_linux_*/ - name: Upload Windows binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cli_windows_snapshot path: | From 80136dea5fb23f06595f4934f3dd20605d6fb7a0 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 4 Jul 2024 08:54:41 +0200 Subject: [PATCH 268/286] Use Go 1.22 to build and test (#1562) ## Changes This has been released for a while. Blog post: https://go.dev/blog/go1.22. ## Tests None besides the unit tests. --- .github/workflows/push.yml | 6 +++--- .github/workflows/release-snapshot.yml | 2 +- .github/workflows/release.yml | 2 +- go.mod | 2 +- libs/process/background_test.go | 5 ++--- 5 files changed, 8 insertions(+), 9 deletions(-) diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 244bdeee5..08edfb9da 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -33,7 +33,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x - name: Setup Python uses: actions/setup-python@v5 @@ -68,7 +68,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x # No need to download cached dependencies when running gofmt. cache: false @@ -100,7 +100,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x # Github repo: https://github.com/ajv-validator/ajv-cli - name: Install ajv-cli diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index 9807059c7..faa5df022 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -21,7 +21,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x - name: Hide snapshot tag to outsmart GoReleaser run: git tag -d snapshot || true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bde5b377b..cf356ca64 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -22,7 +22,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x # Log into the GitHub Container Registry. The goreleaser action will create # the docker images and push them to the GitHub Container Registry. diff --git a/go.mod b/go.mod index 2dfbf46cf..385a93b09 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/databricks/cli -go 1.21 +go 1.22 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT diff --git a/libs/process/background_test.go b/libs/process/background_test.go index 2ee6727a0..2e47e814b 100644 --- a/libs/process/background_test.go +++ b/libs/process/background_test.go @@ -5,7 +5,6 @@ import ( "bytes" "context" "fmt" - "os" "os/exec" "strings" "testing" @@ -26,8 +25,8 @@ func splitLines(b []byte) (lines []string) { func TestBackgroundUnwrapsNotFound(t *testing.T) { ctx := context.Background() - _, err := Background(ctx, []string{"/bin/meeecho", "1"}) - assert.ErrorIs(t, err, os.ErrNotExist) + _, err := Background(ctx, []string{"meeecho", "1"}) + assert.ErrorIs(t, err, exec.ErrNotFound) } func TestBackground(t *testing.T) { From 8c3be300936c15f6a6ca736e23e4a21834f8f316 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 4 Jul 2024 13:39:55 +0200 Subject: [PATCH 269/286] Use different Go cache key for goreleaser jobs (#1558) ## Changes The goreleaser jobs perform a cross-platform build of the main binary without test files. It should use a different cache than the jobs that run tests for a single platform. This change also updates the `release-snapshot` job to use the latest goreleaser action, as was done in #1477. ## Tests Ran `release-snapshot` job from this PR. --- .github/workflows/release-snapshot.yml | 12 ++++++++++-- .github/workflows/release.yml | 7 +++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index faa5df022..defd1c535 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -23,13 +23,21 @@ jobs: with: go-version: 1.22.x + # The default cache key for this action considers only the `go.sum` file. + # We include .goreleaser.yaml here to differentiate from the cache used by the push action + # that runs unit tests. This job produces and uses a different cache. + cache-dependency-path: | + go.sum + .goreleaser.yaml + - name: Hide snapshot tag to outsmart GoReleaser run: git tag -d snapshot || true - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v4 + id: releaser + uses: goreleaser/goreleaser-action@v6 with: - version: latest + version: ~> v2 args: release --snapshot --skip docker - name: Upload macOS binaries diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index cf356ca64..531fb39bf 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -24,6 +24,13 @@ jobs: with: go-version: 1.22.x + # The default cache key for this action considers only the `go.sum` file. + # We include .goreleaser.yaml here to differentiate from the cache used by the push action + # that runs unit tests. This job produces and uses a different cache. + cache-dependency-path: | + go.sum + .goreleaser.yaml + # Log into the GitHub Container Registry. The goreleaser action will create # the docker images and push them to the GitHub Container Registry. - uses: "docker/login-action@v3" From 040b374430fd4a3519f213673500f0ef8b7d444f Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 4 Jul 2024 13:57:29 +0200 Subject: [PATCH 270/286] Override complex variables with target overrides instead of merging (#1567) ## Changes At the moment we merge values of complex variables while more expected behaviour is overriding the value with the target one. ## Tests Added unit test --- bundle/config/root.go | 26 ++++++++- bundle/config/root_test.go | 53 +++++++++++++++++++ bundle/tests/complex_variables_test.go | 8 +++ bundle/tests/variables/complex/databricks.yml | 2 + 4 files changed, 88 insertions(+), 1 deletion(-) diff --git a/bundle/config/root.go b/bundle/config/root.go index 60faba29c..2bbb78696 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -338,13 +338,36 @@ func (r *Root) MergeTargetOverrides(name string) error { "resources", "sync", "permissions", - "variables", } { if root, err = mergeField(root, target, f); err != nil { return err } } + // Merge `variables`. This field must be overwritten if set, not merged. + if v := target.Get("variables"); v.Kind() != dyn.KindInvalid { + _, err = dyn.Map(v, ".", dyn.Foreach(func(p dyn.Path, variable dyn.Value) (dyn.Value, error) { + varPath := dyn.MustPathFromString("variables").Append(p...) + + vDefault := variable.Get("default") + if vDefault.Kind() != dyn.KindInvalid { + defaultPath := varPath.Append(dyn.Key("default")) + root, err = dyn.SetByPath(root, defaultPath, vDefault) + } + + vLookup := variable.Get("lookup") + if vLookup.Kind() != dyn.KindInvalid { + lookupPath := varPath.Append(dyn.Key("lookup")) + root, err = dyn.SetByPath(root, lookupPath, vLookup) + } + + return root, err + })) + if err != nil { + return err + } + } + // Merge `run_as`. This field must be overwritten if set, not merged. if v := target.Get("run_as"); v.Kind() != dyn.KindInvalid { root, err = dyn.Set(root, "run_as", v) @@ -444,6 +467,7 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { if typeV.MustString() == "complex" { return dyn.NewValue(map[string]dyn.Value{ + "type": typeV, "default": variable, }, variable.Location()), nil } diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index 27cc3d22b..aed670d6c 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -132,3 +132,56 @@ func TestInitializeComplexVariablesViaFlagIsNotAllowed(t *testing.T) { err := root.InitializeVariables([]string{"foo=123"}) assert.ErrorContains(t, err, "setting variables of complex type via --var flag is not supported: foo") } + +func TestRootMergeTargetOverridesWithVariables(t *testing.T) { + root := &Root{ + Bundle: Bundle{}, + Variables: map[string]*variable.Variable{ + "foo": { + Default: "foo", + Description: "foo var", + }, + "foo2": { + Default: "foo2", + Description: "foo2 var", + }, + "complex": { + Type: variable.VariableTypeComplex, + Description: "complex var", + Default: map[string]interface{}{ + "key": "value", + }, + }, + }, + Targets: map[string]*Target{ + "development": { + Variables: map[string]*variable.Variable{ + "foo": { + Default: "bar", + Description: "wrong", + }, + "complex": { + Type: "wrong", + Description: "wrong", + Default: map[string]interface{}{ + "key1": "value1", + }, + }, + }, + }, + }, + } + root.initializeDynamicValue() + require.NoError(t, root.MergeTargetOverrides("development")) + assert.Equal(t, "bar", root.Variables["foo"].Default) + assert.Equal(t, "foo var", root.Variables["foo"].Description) + + assert.Equal(t, "foo2", root.Variables["foo2"].Default) + assert.Equal(t, "foo2 var", root.Variables["foo2"].Description) + + assert.Equal(t, map[string]interface{}{ + "key1": "value1", + }, root.Variables["complex"].Default) + assert.Equal(t, "complex var", root.Variables["complex"].Description) + +} diff --git a/bundle/tests/complex_variables_test.go b/bundle/tests/complex_variables_test.go index ffe80e418..1badea6df 100644 --- a/bundle/tests/complex_variables_test.go +++ b/bundle/tests/complex_variables_test.go @@ -25,8 +25,10 @@ func TestComplexVariables(t *testing.T) { require.Equal(t, "13.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) + require.Equal(t, "some-policy-id", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId) require.Equal(t, 2, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) + require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"]) require.Equal(t, 3, len(b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries)) require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ @@ -59,4 +61,10 @@ func TestComplexVariablesOverride(t *testing.T) { require.Equal(t, "Standard_DS3_v3", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) require.Equal(t, 4, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) require.Equal(t, "false", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) + + // Making sure the variable is overriden and not merged / extended + // These properties are set in the default target but not set in override target + // So they should be empty + require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"]) + require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId) } diff --git a/bundle/tests/variables/complex/databricks.yml b/bundle/tests/variables/complex/databricks.yml index f7535ad4b..ca27f606d 100644 --- a/bundle/tests/variables/complex/databricks.yml +++ b/bundle/tests/variables/complex/databricks.yml @@ -23,9 +23,11 @@ variables: spark_version: "13.2.x-scala2.11" node_type_id: ${var.node_type} num_workers: 2 + policy_id: "some-policy-id" spark_conf: spark.speculation: true spark.databricks.delta.retentionDurationCheck.enabled: false + spark.random: true libraries: type: complex description: "A libraries definition" From 3d8446bbdbb1797a87879af11cb5eb7d4a041aa6 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 5 Jul 2024 12:58:28 +0200 Subject: [PATCH 271/286] Rewrite local path for libraries in foreach tasks (#1569) ## Changes Now local library path in `libraries` section of foreach each tasks are correctly replaced with remote path for this library when it's uploaded to Databricks ## Tests Added unit test --- bundle/artifacts/artifacts.go | 66 ++++++++++++++++++++---------- bundle/artifacts/artifacts_test.go | 16 ++++++++ 2 files changed, 60 insertions(+), 22 deletions(-) diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index 470c329a1..a5f41ae4b 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" @@ -135,36 +136,57 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u remotePath := path.Join(wsfsBase, f.RemotePath) for _, job := range b.Config.Resources.Jobs { - for i := range job.Tasks { - task := &job.Tasks[i] - for j := range task.Libraries { - lib := &task.Libraries[j] - if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { - lib.Whl = remotePath - } - if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { - lib.Jar = remotePath - } - } + rewriteArtifactPath(b, f, job, remotePath) + + } + } + + return nil +} + +func rewriteArtifactPath(b *bundle.Bundle, f *config.ArtifactFile, job *resources.Job, remotePath string) { + // Rewrite artifact path in job task libraries + for i := range job.Tasks { + task := &job.Tasks[i] + for j := range task.Libraries { + lib := &task.Libraries[j] + if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { + lib.Whl = remotePath } + if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { + lib.Jar = remotePath + } + } - for i := range job.Environments { - env := &job.Environments[i] - if env.Spec == nil { - continue + // Rewrite artifact path in job task libraries for ForEachTask + if task.ForEachTask != nil { + forEachTask := task.ForEachTask + for j := range forEachTask.Task.Libraries { + lib := &forEachTask.Task.Libraries[j] + if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { + lib.Whl = remotePath } - - for j := range env.Spec.Dependencies { - lib := env.Spec.Dependencies[j] - if isArtifactMatchLibrary(f, lib, b) { - env.Spec.Dependencies[j] = remotePath - } + if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { + lib.Jar = remotePath } } } } - return nil + // Rewrite artifact path in job environments + for i := range job.Environments { + env := &job.Environments[i] + if env.Spec == nil { + continue + } + + for j := range env.Spec.Dependencies { + lib := env.Spec.Dependencies[j] + if isArtifactMatchLibrary(f, lib, b) { + env.Spec.Dependencies[j] = remotePath + } + } + } } func isArtifactMatchLibrary(f *config.ArtifactFile, libPath string, b *bundle.Bundle) bool { diff --git a/bundle/artifacts/artifacts_test.go b/bundle/artifacts/artifacts_test.go index ca0e578bd..53c2798ed 100644 --- a/bundle/artifacts/artifacts_test.go +++ b/bundle/artifacts/artifacts_test.go @@ -52,6 +52,20 @@ func TestArtifactUpload(t *testing.T) { }, }, }, + { + ForEachTask: &jobs.ForEachTask{ + Task: jobs.Task{ + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + }, + }, }, Environments: []jobs.JobEnvironment{ { @@ -88,4 +102,6 @@ func TestArtifactUpload(t *testing.T) { require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl) require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) + require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl) + require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl) } From 869576e14422bdfeb6fcd745438ebda7aeaf8f6f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 5 Jul 2024 13:32:29 +0200 Subject: [PATCH 272/286] Move bespoke status call to main workspace files filer (#1570) ## Changes This consolidates the two separate status calls into one. The extension-aware filer now doesn't need the direct API client anymore and fully relies on the underlying filer. ## Tests * Unit tests. * Ran the filer integration tests manually. --- libs/filer/workspace_files_client.go | 51 +++++++++--- libs/filer/workspace_files_client_test.go | 39 +++++++++ .../workspace_files_extensions_client.go | 81 ++++--------------- 3 files changed, 97 insertions(+), 74 deletions(-) diff --git a/libs/filer/workspace_files_client.go b/libs/filer/workspace_files_client.go index 09f11b161..d799c1f88 100644 --- a/libs/filer/workspace_files_client.go +++ b/libs/filer/workspace_files_client.go @@ -19,6 +19,7 @@ import ( "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/workspace" ) @@ -38,7 +39,7 @@ func (entry wsfsDirEntry) Info() (fs.FileInfo, error) { func wsfsDirEntriesFromObjectInfos(objects []workspace.ObjectInfo) []fs.DirEntry { info := make([]fs.DirEntry, len(objects)) for i, v := range objects { - info[i] = wsfsDirEntry{wsfsFileInfo{oi: v}} + info[i] = wsfsDirEntry{wsfsFileInfo{ObjectInfo: v}} } // Sort by name for parity with os.ReadDir. @@ -48,19 +49,22 @@ func wsfsDirEntriesFromObjectInfos(objects []workspace.ObjectInfo) []fs.DirEntry // Type that implements fs.FileInfo for WSFS. type wsfsFileInfo struct { - oi workspace.ObjectInfo + workspace.ObjectInfo + + // The export format of a notebook. This is not exposed by the SDK. + ReposExportFormat workspace.ExportFormat `json:"repos_export_format,omitempty"` } func (info wsfsFileInfo) Name() string { - return path.Base(info.oi.Path) + return path.Base(info.ObjectInfo.Path) } func (info wsfsFileInfo) Size() int64 { - return info.oi.Size + return info.ObjectInfo.Size } func (info wsfsFileInfo) Mode() fs.FileMode { - switch info.oi.ObjectType { + switch info.ObjectInfo.ObjectType { case workspace.ObjectTypeDirectory, workspace.ObjectTypeRepo: return fs.ModeDir default: @@ -69,7 +73,7 @@ func (info wsfsFileInfo) Mode() fs.FileMode { } func (info wsfsFileInfo) ModTime() time.Time { - return time.UnixMilli(info.oi.ModifiedAt) + return time.UnixMilli(info.ObjectInfo.ModifiedAt) } func (info wsfsFileInfo) IsDir() bool { @@ -77,7 +81,21 @@ func (info wsfsFileInfo) IsDir() bool { } func (info wsfsFileInfo) Sys() any { - return info.oi + return info.ObjectInfo +} + +// UnmarshalJSON is a custom unmarshaller for the wsfsFileInfo struct. +// It must be defined for this type because otherwise the implementation +// of the embedded ObjectInfo type will be used. +func (info *wsfsFileInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, info) +} + +// MarshalJSON is a custom marshaller for the wsfsFileInfo struct. +// It must be defined for this type because otherwise the implementation +// of the embedded ObjectInfo type will be used. +func (info *wsfsFileInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(info) } // WorkspaceFilesClient implements the files-in-workspace API. @@ -293,7 +311,22 @@ func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileIn return nil, err } - info, err := w.workspaceClient.Workspace.GetStatusByPath(ctx, absPath) + var stat wsfsFileInfo + + // Perform bespoke API call because "return_export_info" is not exposed by the SDK. + // We need "repos_export_format" to determine if the file is a py or a ipynb notebook. + // This is not exposed by the SDK so we need to make a direct API call. + err = w.apiClient.Do( + ctx, + http.MethodGet, + "/api/2.0/workspace/get-status", + nil, + map[string]string{ + "path": absPath, + "return_export_info": "true", + }, + &stat, + ) if err != nil { // If we got an API error we deal with it below. var aerr *apierr.APIError @@ -307,5 +340,5 @@ func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileIn } } - return wsfsFileInfo{*info}, nil + return stat, nil } diff --git a/libs/filer/workspace_files_client_test.go b/libs/filer/workspace_files_client_test.go index 4e9537641..650b5be68 100644 --- a/libs/filer/workspace_files_client_test.go +++ b/libs/filer/workspace_files_client_test.go @@ -1,8 +1,10 @@ package filer import ( + "encoding/json" "io/fs" "testing" + "time" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" @@ -54,3 +56,40 @@ func TestWorkspaceFilesDirEntry(t *testing.T) { assert.False(t, i1.IsDir()) assert.True(t, i2.IsDir()) } + +func TestWorkspaceFilesClient_wsfsUnmarshal(t *testing.T) { + payload := ` + { + "created_at": 1671030805916, + "language": "PYTHON", + "modified_at": 1671032235392, + "object_id": 795822750063438, + "object_type": "NOTEBOOK", + "path": "/some/path/to/a/notebook", + "repos_export_format": "SOURCE", + "resource_id": "795822750063438" + } + ` + + var info wsfsFileInfo + err := json.Unmarshal([]byte(payload), &info) + require.NoError(t, err) + + // Fields in the object info. + assert.Equal(t, int64(1671030805916), info.CreatedAt) + assert.Equal(t, workspace.LanguagePython, info.Language) + assert.Equal(t, int64(1671032235392), info.ModifiedAt) + assert.Equal(t, int64(795822750063438), info.ObjectId) + assert.Equal(t, workspace.ObjectTypeNotebook, info.ObjectType) + assert.Equal(t, "/some/path/to/a/notebook", info.Path) + assert.Equal(t, workspace.ExportFormatSource, info.ReposExportFormat) + assert.Equal(t, "795822750063438", info.ResourceId) + + // Functions for fs.FileInfo. + assert.Equal(t, "notebook", info.Name()) + assert.Equal(t, int64(0), info.Size()) + assert.Equal(t, fs.ModePerm, info.Mode()) + assert.Equal(t, time.UnixMilli(1671032235392), info.ModTime()) + assert.False(t, info.IsDir()) + assert.NotNil(t, info.Sys()) +} diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go index 3ce6913af..a872dcc65 100644 --- a/libs/filer/workspace_files_extensions_client.go +++ b/libs/filer/workspace_files_extensions_client.go @@ -6,22 +6,17 @@ import ( "fmt" "io" "io/fs" - "net/http" "path" "strings" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/notebook" "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" - "github.com/databricks/databricks-sdk-go/client" - "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/workspace" ) type workspaceFilesExtensionsClient struct { workspaceClient *databricks.WorkspaceClient - apiClient *client.DatabricksClient wsfs Filer root string @@ -35,64 +30,20 @@ var extensionsToLanguages = map[string]workspace.Language{ ".ipynb": workspace.LanguagePython, } -// workspaceFileStatus defines a custom response body for the "/api/2.0/workspace/get-status" API. -// The "repos_export_format" field is not exposed by the SDK. type workspaceFileStatus struct { - *workspace.ObjectInfo - - // The export format of the notebook. This is not exposed by the SDK. - ReposExportFormat workspace.ExportFormat `json:"repos_export_format,omitempty"` + wsfsFileInfo // Name of the file to be used in any API calls made using the workspace files // filer. For notebooks this path does not include the extension. nameForWorkspaceAPI string } -// A custom unmarsaller for the workspaceFileStatus struct. This is needed because -// workspaceFileStatus embeds the workspace.ObjectInfo which itself has a custom -// unmarshaller. -// If a custom unmarshaller is not provided extra fields like ReposExportFormat -// will not have values set. -func (s *workspaceFileStatus) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s *workspaceFileStatus) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -func (w *workspaceFilesExtensionsClient) stat(ctx context.Context, name string) (*workspaceFileStatus, error) { - stat := &workspaceFileStatus{ - nameForWorkspaceAPI: name, - } - - // Perform bespoke API call because "return_export_info" is not exposed by the SDK. - // We need "repos_export_format" to determine if the file is a py or a ipynb notebook. - // This is not exposed by the SDK so we need to make a direct API call. - err := w.apiClient.Do( - ctx, - http.MethodGet, - "/api/2.0/workspace/get-status", - nil, - map[string]string{ - "path": path.Join(w.root, name), - "return_export_info": "true", - }, - stat, - ) +func (w *workspaceFilesExtensionsClient) stat(ctx context.Context, name string) (wsfsFileInfo, error) { + info, err := w.wsfs.Stat(ctx, name) if err != nil { - // If we got an API error we deal with it below. - var aerr *apierr.APIError - if !errors.As(err, &aerr) { - return nil, err - } - - // This API returns a 404 if the specified path does not exist. - if aerr.StatusCode == http.StatusNotFound { - return nil, FileDoesNotExistError{path.Join(w.root, name)} - } + return wsfsFileInfo{}, err } - return stat, err + return info.(wsfsFileInfo), err } // This function returns the stat for the provided notebook. The stat object itself contains the path @@ -146,7 +97,10 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx contex // Modify the stat object path to include the extension. This stat object will be used // to return the fs.FileInfo object in the stat method. stat.Path = stat.Path + ext - return stat, nil + return &workspaceFileStatus{ + wsfsFileInfo: stat, + nameForWorkspaceAPI: nameWithoutExt, + }, nil } func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx context.Context, name string) (*workspaceFileStatus, error) { @@ -162,7 +116,7 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx con } // Get the extension for the notebook. - ext := notebook.GetExtensionByLanguage(stat.ObjectInfo) + ext := notebook.GetExtensionByLanguage(&stat.ObjectInfo) // If the notebook was exported as a Jupyter notebook, the extension should be .ipynb. if stat.Language == workspace.LanguagePython && stat.ReposExportFormat == workspace.ExportFormatJupyter { @@ -172,7 +126,10 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx con // Modify the stat object path to include the extension. This stat object will be used // to return the fs.DirEntry object in the ReadDir method. stat.Path = stat.Path + ext - return stat, nil + return &workspaceFileStatus{ + wsfsFileInfo: stat, + nameForWorkspaceAPI: name, + }, nil } type DuplicatePathError struct { @@ -200,11 +157,6 @@ func (e DuplicatePathError) Error() string { // errors for namespace clashes (e.g. a file and a notebook or a directory and a notebook). // Thus users of these methods should be careful to avoid such clashes. func NewWorkspaceFilesExtensionsClient(w *databricks.WorkspaceClient, root string) (Filer, error) { - apiClient, err := client.New(w.Config) - if err != nil { - return nil, err - } - filer, err := NewWorkspaceFilesClient(w, root) if err != nil { return nil, err @@ -212,7 +164,6 @@ func NewWorkspaceFilesExtensionsClient(w *databricks.WorkspaceClient, root strin return &workspaceFilesExtensionsClient{ workspaceClient: w, - apiClient: apiClient, wsfs: filer, root: root, @@ -240,7 +191,7 @@ func (w *workspaceFilesExtensionsClient) ReadDir(ctx context.Context, name strin return nil, err } // Replace the entry with the new entry that includes the extension. - entries[i] = wsfsDirEntry{wsfsFileInfo{oi: *stat.ObjectInfo}} + entries[i] = wsfsDirEntry{wsfsFileInfo{ObjectInfo: stat.ObjectInfo}} } // Error if we have seen this path before in the current directory. @@ -331,7 +282,7 @@ func (w *workspaceFilesExtensionsClient) Stat(ctx context.Context, name string) return nil, err } - return wsfsFileInfo{oi: *stat.ObjectInfo}, nil + return wsfsFileInfo{ObjectInfo: stat.ObjectInfo}, nil } return info, err From d30c4c730d1e0b674a14471a401e9b0d07d64dd7 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Mon, 8 Jul 2024 15:32:56 +0200 Subject: [PATCH 273/286] Add new template (#1578) ## Changes Add a new hidden experimental template ## Tests Tested manually --- cmd/bundle/init.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index c8c59c149..c25391577 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -49,6 +49,12 @@ var nativeTemplates = []nativeTemplate{ description: "The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)", aliases: []string{"mlops-stack"}, }, + { + name: "default-pydabs", + gitUrl: "https://databricks.github.io/workflows-authoring-toolkit/pydabs-template.git", + hidden: true, + description: "The default PyDABs template", + }, { name: customTemplate, description: "Bring your own template", From 056e2af743dda2f50e4d4b45764f020ef2c26e6a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Jul 2024 11:01:11 +0200 Subject: [PATCH 274/286] Bump golang.org/x/mod from 0.18.0 to 0.19.0 (#1576) Bumps [golang.org/x/mod](https://github.com/golang/mod) from 0.18.0 to 0.19.0.
Commits
  • d58be1c sumdb/tlog: set the hash of the empty tree according to RFC 6962
  • 232e49f Revert "module: add COM0 and LPT0 to badWindowsNames"
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/mod&package-manager=go_modules&previous-version=0.18.0&new-version=0.19.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 385a93b09..175591d23 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/mod v0.18.0 + golang.org/x/mod v0.19.0 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 golang.org/x/term v0.21.0 diff --git a/go.sum b/go.sum index 864b7919b..2121224bf 100644 --- a/go.sum +++ b/go.sum @@ -180,8 +180,8 @@ golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= From 4d13c7fbe3525dabc9706c54dbe661511b4c5441 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Jul 2024 11:01:30 +0200 Subject: [PATCH 275/286] Bump golang.org/x/term from 0.21.0 to 0.22.0 (#1577) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.21.0 to 0.22.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.21.0&new-version=0.22.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 175591d23..ce7ad0c1e 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( golang.org/x/mod v0.19.0 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 - golang.org/x/term v0.21.0 + golang.org/x/term v0.22.0 golang.org/x/text v0.16.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 @@ -61,7 +61,7 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/crypto v0.23.0 // indirect golang.org/x/net v0.25.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.182.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect diff --git a/go.sum b/go.sum index 2121224bf..eb7a87a89 100644 --- a/go.sum +++ b/go.sum @@ -208,10 +208,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= From 8b468b423ff1166f104211548f08a5ab941732e0 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 9 Jul 2024 13:12:42 +0200 Subject: [PATCH 276/286] Change SetVariables mutator to mutate dynamic configuration instead (#1573) ## Changes Previously `SetVariables` mutator mutated typed configuration by using `v.Set` for variables. This lead to variables `value` field not having location information. By using dynamic configuration mutation, we keep the same functionality but also preserve location information for value when it's set from default. Fixes #1568 #1538 ## Tests Added unit tests --- bundle/config/mutator/set_variables.go | 59 ++++++++++------- bundle/config/mutator/set_variables_test.go | 55 ++++++++++++---- bundle/config/mutator/translate_paths_test.go | 64 +++++++++++++++++++ 3 files changed, 143 insertions(+), 35 deletions(-) diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index b3a9cf400..47ce2ad03 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -2,10 +2,12 @@ package mutator import ( "context" + "fmt" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/env" ) @@ -21,52 +23,63 @@ func (m *setVariables) Name() string { return "SetVariables" } -func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Diagnostics { +func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable, name string) (dyn.Value, error) { // case: variable already has value initialized, so skip - if v.HasValue() { - return nil + if variable.HasValue() { + return v, nil } // case: read and set variable value from process environment envVarName := bundleVarPrefix + name if val, ok := env.Lookup(ctx, envVarName); ok { - if v.IsComplex() { - return diag.Errorf(`setting via environment variables (%s) is not supported for complex variable %s`, envVarName, name) + if variable.IsComplex() { + return dyn.InvalidValue, fmt.Errorf(`setting via environment variables (%s) is not supported for complex variable %s`, envVarName, name) } - err := v.Set(val) + v, err := dyn.Set(v, "value", dyn.V(val)) if err != nil { - return diag.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %v`, val, name, envVarName, err) + return dyn.InvalidValue, fmt.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %v`, val, name, envVarName, err) } - return nil + return v, nil } // case: Defined a variable for named lookup for a resource // It will be resolved later in ResolveResourceReferences mutator - if v.Lookup != nil { - return nil + if variable.Lookup != nil { + return v, nil } // case: Set the variable to its default value - if v.HasDefault() { - err := v.Set(v.Default) + if variable.HasDefault() { + vDefault, err := dyn.Get(v, "default") if err != nil { - return diag.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, v.Default, name, err) + return dyn.InvalidValue, fmt.Errorf(`failed to get default value from config "%s" for variable %s with error: %v`, variable.Default, name, err) } - return nil + + v, err := dyn.Set(v, "value", vDefault) + if err != nil { + return dyn.InvalidValue, fmt.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, variable.Default, name, err) + } + return v, nil } // We should have had a value to set for the variable at this point. - return diag.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) + return dyn.InvalidValue, fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) + } func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - var diags diag.Diagnostics - for name, variable := range b.Config.Variables { - diags = diags.Extend(setVariable(ctx, variable, name)) - if diags.HasError() { - return diags - } - } - return diags + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + return dyn.Map(v, "variables", dyn.Foreach(func(p dyn.Path, variable dyn.Value) (dyn.Value, error) { + name := p[1].Key() + v, ok := b.Config.Variables[name] + if !ok { + return dyn.InvalidValue, fmt.Errorf(`variable "%s" is not defined`, name) + } + + return setVariable(ctx, variable, v, name) + })) + }) + + return diag.FromErr(err) } diff --git a/bundle/config/mutator/set_variables_test.go b/bundle/config/mutator/set_variables_test.go index 65dedee97..d9719793f 100644 --- a/bundle/config/mutator/set_variables_test.go +++ b/bundle/config/mutator/set_variables_test.go @@ -7,6 +7,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -20,9 +22,14 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) { // set value for variable as an environment variable t.Setenv("BUNDLE_VAR_foo", "process-env") + v, err := convert.FromTyped(variable, dyn.NilValue) + require.NoError(t, err) - diags := setVariable(context.Background(), &variable, "foo") - require.NoError(t, diags.Error()) + v, err = setVariable(context.Background(), v, &variable, "foo") + require.NoError(t, err) + + err = convert.ToTyped(&variable, v) + require.NoError(t, err) assert.Equal(t, variable.Value, "process-env") } @@ -33,8 +40,14 @@ func TestSetVariableUsingDefaultValue(t *testing.T) { Default: defaultVal, } - diags := setVariable(context.Background(), &variable, "foo") - require.NoError(t, diags.Error()) + v, err := convert.FromTyped(variable, dyn.NilValue) + require.NoError(t, err) + + v, err = setVariable(context.Background(), v, &variable, "foo") + require.NoError(t, err) + + err = convert.ToTyped(&variable, v) + require.NoError(t, err) assert.Equal(t, variable.Value, "default") } @@ -49,8 +62,14 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { // since a value is already assigned to the variable, it would not be overridden // by the default value - diags := setVariable(context.Background(), &variable, "foo") - require.NoError(t, diags.Error()) + v, err := convert.FromTyped(variable, dyn.NilValue) + require.NoError(t, err) + + v, err = setVariable(context.Background(), v, &variable, "foo") + require.NoError(t, err) + + err = convert.ToTyped(&variable, v) + require.NoError(t, err) assert.Equal(t, variable.Value, "assigned-value") } @@ -68,8 +87,14 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { // since a value is already assigned to the variable, it would not be overridden // by the value from environment - diags := setVariable(context.Background(), &variable, "foo") - require.NoError(t, diags.Error()) + v, err := convert.FromTyped(variable, dyn.NilValue) + require.NoError(t, err) + + v, err = setVariable(context.Background(), v, &variable, "foo") + require.NoError(t, err) + + err = convert.ToTyped(&variable, v) + require.NoError(t, err) assert.Equal(t, variable.Value, "assigned-value") } @@ -79,8 +104,11 @@ func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) { } // fails because we could not resolve a value for the variable - diags := setVariable(context.Background(), &variable, "foo") - assert.ErrorContains(t, diags.Error(), "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable") + v, err := convert.FromTyped(variable, dyn.NilValue) + require.NoError(t, err) + + _, err = setVariable(context.Background(), v, &variable, "foo") + assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable") } func TestSetVariablesMutator(t *testing.T) { @@ -126,6 +154,9 @@ func TestSetComplexVariablesViaEnvVariablesIsNotAllowed(t *testing.T) { // set value for variable as an environment variable t.Setenv("BUNDLE_VAR_foo", "process-env") - diags := setVariable(context.Background(), &variable, "foo") - assert.ErrorContains(t, diags.Error(), "setting via environment variables (BUNDLE_VAR_foo) is not supported for complex variable foo") + v, err := convert.FromTyped(variable, dyn.NilValue) + require.NoError(t, err) + + _, err = setVariable(context.Background(), v, &variable, "foo") + assert.ErrorContains(t, err, "setting via environment variables (BUNDLE_VAR_foo) is not supported for complex variable foo") } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 8476ee38a..780a540df 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -11,7 +11,10 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -708,3 +711,64 @@ func TestTranslatePathJobEnvironments(t *testing.T) { assert.Equal(t, "simplejson", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[2]) assert.Equal(t, "/Workspace/Users/foo@bar.com/test.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[3]) } + +func TestTranslatePathWithComplexVariables(t *testing.T) { + dir := t.TempDir() + b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), + Config: config.Root{ + Variables: map[string]*variable.Variable{ + "cluster_libraries": { + Type: variable.VariableTypeComplex, + Default: [](map[string]string){ + { + "whl": "./local/whl.whl", + }, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "test", + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "variables", filepath.Join(dir, "variables/variables.yml")) + bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml")) + + ctx := context.Background() + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + p := dyn.MustPathFromString("resources.jobs.job.tasks[0]") + return dyn.SetByPath(v, p.Append(dyn.Key("libraries")), dyn.V("${var.cluster_libraries}")) + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, + bundle.Seq( + mutator.SetVariables(), + mutator.ResolveVariableReferences("variables"), + mutator.TranslatePaths(), + )) + require.NoError(t, diags.Error()) + + assert.Equal( + t, + filepath.Join("variables", "local", "whl.whl"), + b.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl, + ) +} From 5bc5c3c26acf671026217b71bd52afb72cf3a472 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 9 Jul 2024 20:38:38 +0530 Subject: [PATCH 277/286] Return early in bundle destroy if no deployment exists (#1581) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This PR: 1. Moves the if mutator to the bundle package, to live with all-time greats such as `bundle.Seq` and `bundle.Defer`. Also adds unit tests. 2. `bundle destroy` now returns early if `root_path` does not exist. We do this by leveraging a `bundle.If` condition. ## Tests Unit tests and manually. Here's an example of what it'll look like once the bundle is destroyed. ``` ➜ bundle-playground git:(master) ✗ cli bundle destroy No active deployment found to destroy! ``` I would have added some e2e coverage for this as well, but the `cobraTestRunner.Run()` method does not seem to return stdout/stderr logs correctly. We can probably punt looking into it. --- bundle/config/mutator/if.go | 36 ------------------------- bundle/if.go | 40 ++++++++++++++++++++++++++++ bundle/if_test.go | 53 +++++++++++++++++++++++++++++++++++++ bundle/phases/destroy.go | 29 ++++++++++++++++++-- bundle/python/transform.go | 8 +++--- 5 files changed, 125 insertions(+), 41 deletions(-) delete mode 100644 bundle/config/mutator/if.go create mode 100644 bundle/if.go create mode 100644 bundle/if_test.go diff --git a/bundle/config/mutator/if.go b/bundle/config/mutator/if.go deleted file mode 100644 index 1b7856b3c..000000000 --- a/bundle/config/mutator/if.go +++ /dev/null @@ -1,36 +0,0 @@ -package mutator - -import ( - "context" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/libs/diag" -) - -type ifMutator struct { - condition func(*bundle.Bundle) bool - onTrueMutator bundle.Mutator - onFalseMutator bundle.Mutator -} - -func If( - condition func(*bundle.Bundle) bool, - onTrueMutator bundle.Mutator, - onFalseMutator bundle.Mutator, -) bundle.Mutator { - return &ifMutator{ - condition, onTrueMutator, onFalseMutator, - } -} - -func (m *ifMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - if m.condition(b) { - return bundle.Apply(ctx, b, m.onTrueMutator) - } else { - return bundle.Apply(ctx, b, m.onFalseMutator) - } -} - -func (m *ifMutator) Name() string { - return "If" -} diff --git a/bundle/if.go b/bundle/if.go new file mode 100644 index 000000000..bad1d72d2 --- /dev/null +++ b/bundle/if.go @@ -0,0 +1,40 @@ +package bundle + +import ( + "context" + + "github.com/databricks/cli/libs/diag" +) + +type ifMutator struct { + condition func(context.Context, *Bundle) (bool, error) + onTrueMutator Mutator + onFalseMutator Mutator +} + +func If( + condition func(context.Context, *Bundle) (bool, error), + onTrueMutator Mutator, + onFalseMutator Mutator, +) Mutator { + return &ifMutator{ + condition, onTrueMutator, onFalseMutator, + } +} + +func (m *ifMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { + v, err := m.condition(ctx, b) + if err != nil { + return diag.FromErr(err) + } + + if v { + return Apply(ctx, b, m.onTrueMutator) + } else { + return Apply(ctx, b, m.onFalseMutator) + } +} + +func (m *ifMutator) Name() string { + return "If" +} diff --git a/bundle/if_test.go b/bundle/if_test.go new file mode 100644 index 000000000..b3fc0b9d9 --- /dev/null +++ b/bundle/if_test.go @@ -0,0 +1,53 @@ +package bundle + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIfMutatorTrue(t *testing.T) { + m1 := &testMutator{} + m2 := &testMutator{} + ifMutator := If(func(context.Context, *Bundle) (bool, error) { + return true, nil + }, m1, m2) + + b := &Bundle{} + diags := Apply(context.Background(), b, ifMutator) + assert.NoError(t, diags.Error()) + + assert.Equal(t, 1, m1.applyCalled) + assert.Equal(t, 0, m2.applyCalled) +} + +func TestIfMutatorFalse(t *testing.T) { + m1 := &testMutator{} + m2 := &testMutator{} + ifMutator := If(func(context.Context, *Bundle) (bool, error) { + return false, nil + }, m1, m2) + + b := &Bundle{} + diags := Apply(context.Background(), b, ifMutator) + assert.NoError(t, diags.Error()) + + assert.Equal(t, 0, m1.applyCalled) + assert.Equal(t, 1, m2.applyCalled) +} + +func TestIfMutatorError(t *testing.T) { + m1 := &testMutator{} + m2 := &testMutator{} + ifMutator := If(func(context.Context, *Bundle) (bool, error) { + return true, assert.AnError + }, m1, m2) + + b := &Bundle{} + diags := Apply(context.Background(), b, ifMutator) + assert.Error(t, diags.Error()) + + assert.Equal(t, 0, m1.applyCalled) + assert.Equal(t, 0, m2.applyCalled) +} diff --git a/bundle/phases/destroy.go b/bundle/phases/destroy.go index f974a0565..f1beace84 100644 --- a/bundle/phases/destroy.go +++ b/bundle/phases/destroy.go @@ -1,15 +1,33 @@ package phases import ( + "context" + "errors" + "net/http" + "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/bundle/deploy/lock" "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go/apierr" ) +func assertRootPathExists(ctx context.Context, b *bundle.Bundle) (bool, error) { + w := b.WorkspaceClient() + _, err := w.Workspace.GetStatusByPath(ctx, b.Config.Workspace.RootPath) + + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.StatusCode == http.StatusNotFound { + log.Infof(ctx, "Root path does not exist: %s", b.Config.Workspace.RootPath) + return false, nil + } + + return true, err +} + // The destroy phase deletes artifacts and resources. func Destroy() bundle.Mutator { - destroyMutator := bundle.Seq( lock.Acquire(), bundle.Defer( @@ -29,6 +47,13 @@ func Destroy() bundle.Mutator { return newPhase( "destroy", - []bundle.Mutator{destroyMutator}, + []bundle.Mutator{ + // Only run deploy mutator if root path exists. + bundle.If( + assertRootPathExists, + destroyMutator, + bundle.LogString("No active deployment found to destroy!"), + ), + }, ) } diff --git a/bundle/python/transform.go b/bundle/python/transform.go index 457b45f78..9d3b1ab6a 100644 --- a/bundle/python/transform.go +++ b/bundle/python/transform.go @@ -1,6 +1,7 @@ package python import ( + "context" "fmt" "strconv" "strings" @@ -63,9 +64,10 @@ dbutils.notebook.exit(s) // which installs uploaded wheels using %pip and then calling corresponding // entry point. func TransformWheelTask() bundle.Mutator { - return mutator.If( - func(b *bundle.Bundle) bool { - return b.Config.Experimental != nil && b.Config.Experimental.PythonWheelWrapper + return bundle.If( + func(_ context.Context, b *bundle.Bundle) (bool, error) { + res := b.Config.Experimental != nil && b.Config.Experimental.PythonWheelWrapper + return res, nil }, mutator.NewTrampoline( "python_wheel", From 8f56ca39a26296edabe141a9d974b971ee728849 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 10 Jul 2024 08:37:47 +0200 Subject: [PATCH 278/286] Let notebook detection code use underlying metadata if available (#1574) ## Changes If we're using a `vfs.Path` backed by a workspace filesystem filer, we have access to the `workspace.ObjectInfo` value for every file. By providing access to this value we can use it directly and avoid reading the first line of the underlying file. A follow-up change will implement the interface defined in this change for the workspace filesystem filer. ## Tests Unit tests. --- libs/notebook/detect.go | 76 ++++++++++++++++++++++++++++++----- libs/notebook/detect_test.go | 18 +++++++++ libs/notebook/fakefs_test.go | 77 ++++++++++++++++++++++++++++++++++++ 3 files changed, 161 insertions(+), 10 deletions(-) create mode 100644 libs/notebook/fakefs_test.go diff --git a/libs/notebook/detect.go b/libs/notebook/detect.go index 0b7c04d6d..582a88479 100644 --- a/libs/notebook/detect.go +++ b/libs/notebook/detect.go @@ -12,27 +12,69 @@ import ( "github.com/databricks/databricks-sdk-go/service/workspace" ) +// FileInfoWithWorkspaceObjectInfo is an interface implemented by [fs.FileInfo] values that +// contain a file's underlying [workspace.ObjectInfo]. +// +// This may be the case when working with a [filer.Filer] backed by the workspace API. +// For these files we do not need to read a file's header to know if it is a notebook; +// we can use the [workspace.ObjectInfo] value directly. +type FileInfoWithWorkspaceObjectInfo interface { + WorkspaceObjectInfo() workspace.ObjectInfo +} + // Maximum length in bytes of the notebook header. const headerLength = 32 -// readHeader reads the first N bytes from a file. -func readHeader(fsys fs.FS, name string) ([]byte, error) { +// file wraps an fs.File and implements a few helper methods such that +// they don't need to be inlined in the [DetectWithFS] function below. +type file struct { + f fs.File +} + +func openFile(fsys fs.FS, name string) (*file, error) { f, err := fsys.Open(name) if err != nil { return nil, err } - defer f.Close() + return &file{f: f}, nil +} +func (f file) close() error { + return f.f.Close() +} + +func (f file) readHeader() (string, error) { // Scan header line with some padding. var buf = make([]byte, headerLength) - n, err := f.Read([]byte(buf)) + n, err := f.f.Read([]byte(buf)) if err != nil && err != io.EOF { - return nil, err + return "", err } // Trim buffer to actual read bytes. - return buf[:n], nil + buf = buf[:n] + + // Read the first line from the buffer. + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Scan() + return scanner.Text(), nil +} + +// getObjectInfo returns the [workspace.ObjectInfo] for the file if it is +// part of the [fs.FileInfo] value returned by the [fs.Stat] call. +func (f file) getObjectInfo() (oi workspace.ObjectInfo, ok bool, err error) { + stat, err := f.f.Stat() + if err != nil { + return workspace.ObjectInfo{}, false, err + } + + // Use object info if available. + if i, ok := stat.(FileInfoWithWorkspaceObjectInfo); ok { + return i.WorkspaceObjectInfo(), true, nil + } + + return workspace.ObjectInfo{}, false, nil } // Detect returns whether the file at path is a Databricks notebook. @@ -40,13 +82,27 @@ func readHeader(fsys fs.FS, name string) ([]byte, error) { func DetectWithFS(fsys fs.FS, name string) (notebook bool, language workspace.Language, err error) { header := "" - buf, err := readHeader(fsys, name) + f, err := openFile(fsys, name) + if err != nil { + return false, "", err + } + + defer f.close() + + // Use object info if available. + oi, ok, err := f.getObjectInfo() + if err != nil { + return false, "", err + } + if ok { + return oi.ObjectType == workspace.ObjectTypeNotebook, oi.Language, nil + } + + // Read the first line of the file. + fileHeader, err := f.readHeader() if err != nil { return false, "", err } - scanner := bufio.NewScanner(bytes.NewReader(buf)) - scanner.Scan() - fileHeader := scanner.Text() // Determine which header to expect based on filename extension. ext := strings.ToLower(filepath.Ext(name)) diff --git a/libs/notebook/detect_test.go b/libs/notebook/detect_test.go index fd3337579..ad89d6dd5 100644 --- a/libs/notebook/detect_test.go +++ b/libs/notebook/detect_test.go @@ -99,3 +99,21 @@ func TestDetectFileWithLongHeader(t *testing.T) { require.NoError(t, err) assert.False(t, nb) } + +func TestDetectWithObjectInfo(t *testing.T) { + fakeFS := &fakeFS{ + fakeFile{ + fakeFileInfo{ + workspace.ObjectInfo{ + ObjectType: workspace.ObjectTypeNotebook, + Language: workspace.LanguagePython, + }, + }, + }, + } + + nb, lang, err := DetectWithFS(fakeFS, "doesntmatter") + require.NoError(t, err) + assert.True(t, nb) + assert.Equal(t, workspace.LanguagePython, lang) +} diff --git a/libs/notebook/fakefs_test.go b/libs/notebook/fakefs_test.go new file mode 100644 index 000000000..4ac135dd4 --- /dev/null +++ b/libs/notebook/fakefs_test.go @@ -0,0 +1,77 @@ +package notebook + +import ( + "fmt" + "io/fs" + "time" + + "github.com/databricks/databricks-sdk-go/service/workspace" +) + +type fakeFS struct { + fakeFile +} + +type fakeFile struct { + fakeFileInfo +} + +func (f fakeFile) Close() error { + return nil +} + +func (f fakeFile) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("not implemented") +} + +func (f fakeFile) Stat() (fs.FileInfo, error) { + return f.fakeFileInfo, nil +} + +type fakeFileInfo struct { + oi workspace.ObjectInfo +} + +func (f fakeFileInfo) WorkspaceObjectInfo() workspace.ObjectInfo { + return f.oi +} + +func (f fakeFileInfo) Name() string { + return "" +} + +func (f fakeFileInfo) Size() int64 { + return 0 +} + +func (f fakeFileInfo) Mode() fs.FileMode { + return 0 +} + +func (f fakeFileInfo) ModTime() time.Time { + return time.Time{} +} + +func (f fakeFileInfo) IsDir() bool { + return false +} + +func (f fakeFileInfo) Sys() any { + return nil +} + +func (f fakeFS) Open(name string) (fs.File, error) { + return f.fakeFile, nil +} + +func (f fakeFS) Stat(name string) (fs.FileInfo, error) { + panic("not implemented") +} + +func (f fakeFS) ReadDir(name string) ([]fs.DirEntry, error) { + panic("not implemented") +} + +func (f fakeFS) ReadFile(name string) ([]byte, error) { + panic("not implemented") +} From 25737bbb5d7baf44d6bfe06cea32df8593a67c51 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Wed, 10 Jul 2024 08:38:06 +0200 Subject: [PATCH 279/286] Add regression tests for CLI error output (#1566) ## Changes Add regression tests for https://github.com/databricks/cli/issues/1563 We test 2 code paths: - if there is an error, we can print to stderr - if there is a valid output, we can print to stdout We should also consider adding black-box tests that will run the CLI binary as a black box and inspect its output to stderr/stdout. ## Tests Unit tests --- cmd/root/root.go | 7 ++----- internal/helpers.go | 34 ++++++++++++++++++++++---------- internal/unknown_command_test.go | 15 ++++++++++++++ main.go | 7 ++++++- 4 files changed, 47 insertions(+), 16 deletions(-) create mode 100644 internal/unknown_command_test.go diff --git a/cmd/root/root.go b/cmd/root/root.go index 61baa4da0..eda873d12 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -92,9 +92,8 @@ func flagErrorFunc(c *cobra.Command, err error) error { // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. -func Execute(cmd *cobra.Command) { +func Execute(ctx context.Context, cmd *cobra.Command) error { // TODO: deferred panic recovery - ctx := context.Background() // Run the command cmd, err := cmd.ExecuteContextC(ctx) @@ -118,7 +117,5 @@ func Execute(cmd *cobra.Command) { } } - if err != nil { - os.Exit(1) - } + return err } diff --git a/internal/helpers.go b/internal/helpers.go index 3923e7e1e..67a258ba4 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -19,6 +19,9 @@ import ( "testing" "time" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/cli/cmd" _ "github.com/databricks/cli/cmd/version" "github.com/databricks/cli/libs/cmdio" @@ -105,7 +108,12 @@ func (t *cobraTestRunner) registerFlagCleanup(c *cobra.Command) { // Find target command that will be run. Example: if the command run is `databricks fs cp`, // target command corresponds to `cp` targetCmd, _, err := c.Find(t.args) - require.NoError(t, err) + if err != nil && strings.HasPrefix(err.Error(), "unknown command") { + // even if command is unknown, we can proceed + require.NotNil(t, targetCmd) + } else { + require.NoError(t, err) + } // Force initialization of default flags. // These are initialized by cobra at execution time and would otherwise @@ -169,22 +177,28 @@ func (t *cobraTestRunner) RunBackground() { var stdoutW, stderrW io.WriteCloser stdoutR, stdoutW = io.Pipe() stderrR, stderrW = io.Pipe() - root := cmd.New(t.ctx) - root.SetOut(stdoutW) - root.SetErr(stderrW) - root.SetArgs(t.args) + ctx := cmdio.NewContext(t.ctx, &cmdio.Logger{ + Mode: flags.ModeAppend, + Reader: bufio.Reader{}, + Writer: stderrW, + }) + + cli := cmd.New(ctx) + cli.SetOut(stdoutW) + cli.SetErr(stderrW) + cli.SetArgs(t.args) if t.stdinW != nil { - root.SetIn(t.stdinR) + cli.SetIn(t.stdinR) } // Register cleanup function to restore flags to their original values // once test has been executed. This is needed because flag values reside // in a global singleton data-structure, and thus subsequent tests might // otherwise interfere with each other - t.registerFlagCleanup(root) + t.registerFlagCleanup(cli) errch := make(chan error) - ctx, cancel := context.WithCancel(t.ctx) + ctx, cancel := context.WithCancel(ctx) // Tee stdout/stderr to buffers. stdoutR = io.TeeReader(stdoutR, &t.stdout) @@ -197,7 +211,7 @@ func (t *cobraTestRunner) RunBackground() { // Run command in background. go func() { - cmd, err := root.ExecuteContextC(ctx) + err := root.Execute(ctx, cli) if err != nil { t.Logf("Error running command: %s", err) } @@ -230,7 +244,7 @@ func (t *cobraTestRunner) RunBackground() { // These commands are globals so we have to clean up to the best of our ability after each run. // See https://github.com/spf13/cobra/blob/a6f198b635c4b18fff81930c40d464904e55b161/command.go#L1062-L1066 //lint:ignore SA1012 cobra sets the context and doesn't clear it - cmd.SetContext(nil) + cli.SetContext(nil) // Make caller aware of error. errch <- err diff --git a/internal/unknown_command_test.go b/internal/unknown_command_test.go new file mode 100644 index 000000000..62b84027f --- /dev/null +++ b/internal/unknown_command_test.go @@ -0,0 +1,15 @@ +package internal + +import ( + "testing" + + assert "github.com/databricks/cli/libs/dyn/dynassert" +) + +func TestUnknownCommand(t *testing.T) { + stdout, stderr, err := RequireErrorRun(t, "unknown-command") + + assert.Error(t, err, "unknown command", `unknown command "unknown-command" for "databricks"`) + assert.Equal(t, "", stdout.String()) + assert.Contains(t, stderr.String(), "unknown command") +} diff --git a/main.go b/main.go index 8c8516d9d..c568e6adb 100644 --- a/main.go +++ b/main.go @@ -2,11 +2,16 @@ package main import ( "context" + "os" "github.com/databricks/cli/cmd" "github.com/databricks/cli/cmd/root" ) func main() { - root.Execute(cmd.New(context.Background())) + ctx := context.Background() + err := root.Execute(ctx, cmd.New(ctx)) + if err != nil { + os.Exit(1) + } } From 1da04a43182c038ff9f4c1da87e5dd2a27025e38 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 10 Jul 2024 12:27:27 +0530 Subject: [PATCH 280/286] Remove schema override for variable default value (#1536) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This PR: 1. Removes the custom added in https://github.com/databricks/cli/pull/1396/files for `variables.*.default`. It's no longer needed because with complex variables (https://github.com/databricks/cli/pull/1467) `default` has a type of any. 2. Retains, and extends the override on `targets.*.variables.*`. Target override values can now be complex objects, not just primitive values. ## Tests Manually Before: Only primitive types were allowed. Screenshot 2024-06-27 at 3 58 34 PM After: An empty JSON schema is generated. All YAML values are acceptable. Screenshot 2024-06-27 at 3 57 15 PM --- cmd/bundle/schema.go | 54 +++----------------------------------------- 1 file changed, 3 insertions(+), 51 deletions(-) diff --git a/cmd/bundle/schema.go b/cmd/bundle/schema.go index b0d6b3dd5..813aebbae 100644 --- a/cmd/bundle/schema.go +++ b/cmd/bundle/schema.go @@ -11,54 +11,6 @@ import ( "github.com/spf13/cobra" ) -func overrideVariables(s *jsonschema.Schema) error { - // Override schema for default values to allow for multiple primitive types. - // These are normalized to strings when converted to the typed representation. - err := s.SetByPath("variables.*.default", jsonschema.Schema{ - AnyOf: []*jsonschema.Schema{ - { - Type: jsonschema.StringType, - }, - { - Type: jsonschema.BooleanType, - }, - { - Type: jsonschema.NumberType, - }, - { - Type: jsonschema.IntegerType, - }, - }, - }) - if err != nil { - return err - } - - // Override schema for variables in targets to allow just specifying the value - // along side overriding the variable definition if needed. - ns, err := s.GetByPath("variables.*") - if err != nil { - return err - } - return s.SetByPath("targets.*.variables.*", jsonschema.Schema{ - AnyOf: []*jsonschema.Schema{ - { - Type: jsonschema.StringType, - }, - { - Type: jsonschema.BooleanType, - }, - { - Type: jsonschema.NumberType, - }, - { - Type: jsonschema.IntegerType, - }, - &ns, - }, - }) -} - func newSchemaCommand() *cobra.Command { cmd := &cobra.Command{ Use: "schema", @@ -79,9 +31,9 @@ func newSchemaCommand() *cobra.Command { return err } - // Override schema for variables to take into account normalization of default - // variable values and variable overrides in a target. - err = overrideVariables(schema) + // Target variable value overrides can be primitives, maps or sequences. + // Set an empty schema for them. + err = schema.SetByPath("targets.*.variables.*", jsonschema.Schema{}) if err != nil { return err } From af975ca64ba16cbad58a657d39bc6063b4d71311 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Wed, 10 Jul 2024 13:14:57 +0200 Subject: [PATCH 281/286] Print diagnostics in 'bundle deploy' (#1579) ## Changes Print diagnostics in 'bundle deploy' similar to 'bundle validate'. This way if a bundle has any errors or warnings, they are going to be easy to notice. NB: due to how we render errors, there is one extra trailing new line in output, preserved in examples below ## Example: No errors or warnings ``` % databricks bundle deploy Building default... Deploying resources... Updating deployment state... Deployment complete! ``` ## Example: Error on load ``` % databricks bundle deploy Error: Databricks CLI version constraint not satisfied. Required: >= 1337.0.0, current: 0.0.0-dev ``` ## Example: Warning on load ``` % databricks bundle deploy Building default... Deploying resources... Updating deployment state... Deployment complete! Warning: unknown field: foo in databricks.yml:6:1 ``` ## Example: Error + warning on load ``` % databricks bundle deploy Warning: unknown field: foo in databricks.yml:6:1 Error: something went wrong ``` ## Example: Warning on load + error in init ``` % databricks bundle deploy Warning: unknown field: foo in databricks.yml:6:1 Error: Failed to xxx in yyy.yml Detailed explanation in multiple lines ``` ## Tests Tested manually --- bundle/render/render_text_output.go | 19 +++++--- bundle/render/render_text_output_test.go | 46 ++++++++++++++++++- bundle/scripts/scripts.go | 9 +++- cmd/bundle/deploy.go | 57 ++++++++++++++---------- cmd/bundle/validate.go | 3 +- 5 files changed, 102 insertions(+), 32 deletions(-) diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go index 37ea188f7..439ae6132 100644 --- a/bundle/render/render_text_output.go +++ b/bundle/render/render_text_output.go @@ -142,7 +142,7 @@ func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) } // Make file relative to bundle root - if d.Location.File != "" { + if d.Location.File != "" && b != nil { out, err := filepath.Rel(b.RootPath, d.Location.File) // if we can't relativize the path, just use path as-is if err == nil { @@ -160,16 +160,25 @@ func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) return nil } +// RenderOptions contains options for rendering diagnostics. +type RenderOptions struct { + // variable to include leading new line + + RenderSummaryTable bool +} + // RenderTextOutput renders the diagnostics in a human-readable format. -func RenderTextOutput(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { +func RenderTextOutput(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, opts RenderOptions) error { err := renderDiagnostics(out, b, diags) if err != nil { return fmt.Errorf("failed to render diagnostics: %w", err) } - err = renderSummaryTemplate(out, b, diags) - if err != nil { - return fmt.Errorf("failed to render summary: %w", err) + if opts.RenderSummaryTable { + err = renderSummaryTemplate(out, b, diags) + if err != nil { + return fmt.Errorf("failed to render summary: %w", err) + } } return nil diff --git a/bundle/render/render_text_output_test.go b/bundle/render/render_text_output_test.go index 4ae86ded7..b7aec8864 100644 --- a/bundle/render/render_text_output_test.go +++ b/bundle/render/render_text_output_test.go @@ -17,6 +17,7 @@ type renderTestOutputTestCase struct { name string bundle *bundle.Bundle diags diag.Diagnostics + opts RenderOptions expected string } @@ -39,6 +40,7 @@ func TestRenderTextOutput(t *testing.T) { Summary: "failed to load xxx", }, }, + opts: RenderOptions{RenderSummaryTable: true}, expected: "Error: failed to load xxx\n" + "\n" + "Found 1 error\n", @@ -47,6 +49,7 @@ func TestRenderTextOutput(t *testing.T) { name: "bundle during 'load' and 1 error", bundle: loadingBundle, diags: diag.Errorf("failed to load bundle"), + opts: RenderOptions{RenderSummaryTable: true}, expected: "Error: failed to load bundle\n" + "\n" + "Name: test-bundle\n" + @@ -58,6 +61,7 @@ func TestRenderTextOutput(t *testing.T) { name: "bundle during 'load' and 1 warning", bundle: loadingBundle, diags: diag.Warningf("failed to load bundle"), + opts: RenderOptions{RenderSummaryTable: true}, expected: "Warning: failed to load bundle\n" + "\n" + "Name: test-bundle\n" + @@ -69,6 +73,7 @@ func TestRenderTextOutput(t *testing.T) { name: "bundle during 'load' and 2 warnings", bundle: loadingBundle, diags: diag.Warningf("warning (1)").Extend(diag.Warningf("warning (2)")), + opts: RenderOptions{RenderSummaryTable: true}, expected: "Warning: warning (1)\n" + "\n" + "Warning: warning (2)\n" + @@ -113,6 +118,7 @@ func TestRenderTextOutput(t *testing.T) { }, }, }, + opts: RenderOptions{RenderSummaryTable: true}, expected: "Error: error (1)\n" + " in foo.py:1:1\n" + "\n" + @@ -153,6 +159,7 @@ func TestRenderTextOutput(t *testing.T) { }, }, diags: nil, + opts: RenderOptions{RenderSummaryTable: true}, expected: "Name: test-bundle\n" + "Target: test-target\n" + "Workspace:\n" + @@ -162,13 +169,50 @@ func TestRenderTextOutput(t *testing.T) { "\n" + "Validation OK!\n", }, + { + name: "nil bundle without summary with 1 error and 1 warning", + bundle: nil, + diags: diag.Diagnostics{ + diag.Diagnostic{ + Severity: diag.Error, + Summary: "error (1)", + Detail: "detail (1)", + Location: dyn.Location{ + File: "foo.py", + Line: 1, + Column: 1, + }, + }, + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "warning (2)", + Detail: "detail (2)", + Location: dyn.Location{ + File: "foo.py", + Line: 3, + Column: 1, + }, + }, + }, + opts: RenderOptions{RenderSummaryTable: false}, + expected: "Error: error (1)\n" + + " in foo.py:1:1\n" + + "\n" + + "detail (1)\n" + + "\n" + + "Warning: warning (2)\n" + + " in foo.py:3:1\n" + + "\n" + + "detail (2)\n" + + "\n", + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { writer := &bytes.Buffer{} - err := RenderTextOutput(writer, tc.bundle, tc.diags) + err := RenderTextOutput(writer, tc.bundle, tc.diags, tc.opts) require.NoError(t, err) assert.Equal(t, tc.expected, writer.String()) diff --git a/bundle/scripts/scripts.go b/bundle/scripts/scripts.go index 38d204f99..629b3a8ab 100644 --- a/bundle/scripts/scripts.go +++ b/bundle/scripts/scripts.go @@ -37,7 +37,7 @@ func (m *script) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { cmd, out, err := executeHook(ctx, executor, b, m.scriptHook) if err != nil { - return diag.FromErr(err) + return diag.FromErr(fmt.Errorf("failed to execute script: %w", err)) } if cmd == nil { log.Debugf(ctx, "No script defined for %s, skipping", m.scriptHook) @@ -53,7 +53,12 @@ func (m *script) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { line, err = reader.ReadString('\n') } - return diag.FromErr(cmd.Wait()) + err = cmd.Wait() + if err != nil { + return diag.FromErr(fmt.Errorf("failed to execute script: %w", err)) + } + + return nil } func executeHook(ctx context.Context, executor *exec.Executor, b *bundle.Bundle, hook config.ScriptHook) (exec.Command, io.Reader, error) { diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index 919b15a72..1232c8de5 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -2,9 +2,11 @@ package bundle import ( "context" + "fmt" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/bundle/render" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/diag" @@ -30,32 +32,41 @@ func newDeployCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() b, diags := utils.ConfigureBundleWithVariables(cmd) - if err := diags.Error(); err != nil { - return diags.Error() + + if !diags.HasError() { + bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) diag.Diagnostics { + b.Config.Bundle.Force = force + b.Config.Bundle.Deployment.Lock.Force = forceLock + if cmd.Flag("compute-id").Changed { + b.Config.Bundle.ComputeID = computeID + } + + if cmd.Flag("fail-on-active-runs").Changed { + b.Config.Bundle.Deployment.FailOnActiveRuns = failOnActiveRuns + } + + return nil + }) + + diags = diags.Extend( + bundle.Apply(ctx, b, bundle.Seq( + phases.Initialize(), + phases.Build(), + phases.Deploy(), + )), + ) } - bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) diag.Diagnostics { - b.Config.Bundle.Force = force - b.Config.Bundle.Deployment.Lock.Force = forceLock - if cmd.Flag("compute-id").Changed { - b.Config.Bundle.ComputeID = computeID - } - - if cmd.Flag("fail-on-active-runs").Changed { - b.Config.Bundle.Deployment.FailOnActiveRuns = failOnActiveRuns - } - - return nil - }) - - diags = bundle.Apply(ctx, b, bundle.Seq( - phases.Initialize(), - phases.Build(), - phases.Deploy(), - )) - if err := diags.Error(); err != nil { - return err + renderOpts := render.RenderOptions{RenderSummaryTable: false} + err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags, renderOpts) + if err != nil { + return fmt.Errorf("failed to render output: %w", err) } + + if diags.HasError() { + return root.ErrAlreadyPrinted + } + return nil } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 59a977047..496d5d2b5 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -53,7 +53,8 @@ func newValidateCommand() *cobra.Command { switch root.OutputType(cmd) { case flags.OutputText: - err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags) + renderOpts := render.RenderOptions{RenderSummaryTable: true} + err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags, renderOpts) if err != nil { return fmt.Errorf("failed to render output: %w", err) } From 61cb0f269526ad0bc18e43f6cf17acea52cd4093 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 10 Jul 2024 14:04:59 +0200 Subject: [PATCH 282/286] Release v0.223.2 (#1587) Bundles: * Override complex variables with target overrides instead of merging ([#1567](https://github.com/databricks/cli/pull/1567)). * Rewrite local path for libraries in foreach tasks ([#1569](https://github.com/databricks/cli/pull/1569)). * Change SetVariables mutator to mutate dynamic configuration instead ([#1573](https://github.com/databricks/cli/pull/1573)). * Return early in bundle destroy if no deployment exists ([#1581](https://github.com/databricks/cli/pull/1581)). * Let notebook detection code use underlying metadata if available ([#1574](https://github.com/databricks/cli/pull/1574)). * Remove schema override for variable default value ([#1536](https://github.com/databricks/cli/pull/1536)). * Print diagnostics in 'bundle deploy' ([#1579](https://github.com/databricks/cli/pull/1579)). Internal: * Update actions/upload-artifact to v4 ([#1559](https://github.com/databricks/cli/pull/1559)). * Use Go 1.22 to build and test ([#1562](https://github.com/databricks/cli/pull/1562)). * Move bespoke status call to main workspace files filer ([#1570](https://github.com/databricks/cli/pull/1570)). * Add new template ([#1578](https://github.com/databricks/cli/pull/1578)). * Add regression tests for CLI error output ([#1566](https://github.com/databricks/cli/pull/1566)). Dependency updates: * Bump golang.org/x/mod from 0.18.0 to 0.19.0 ([#1576](https://github.com/databricks/cli/pull/1576)). * Bump golang.org/x/term from 0.21.0 to 0.22.0 ([#1577](https://github.com/databricks/cli/pull/1577)). --- CHANGELOG.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16d81f822..eb902e0b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Version changelog +## 0.223.2 + +Bundles: + * Override complex variables with target overrides instead of merging ([#1567](https://github.com/databricks/cli/pull/1567)). + * Rewrite local path for libraries in foreach tasks ([#1569](https://github.com/databricks/cli/pull/1569)). + * Change SetVariables mutator to mutate dynamic configuration instead ([#1573](https://github.com/databricks/cli/pull/1573)). + * Return early in bundle destroy if no deployment exists ([#1581](https://github.com/databricks/cli/pull/1581)). + * Let notebook detection code use underlying metadata if available ([#1574](https://github.com/databricks/cli/pull/1574)). + * Remove schema override for variable default value ([#1536](https://github.com/databricks/cli/pull/1536)). + * Print diagnostics in 'bundle deploy' ([#1579](https://github.com/databricks/cli/pull/1579)). + +Internal: + * Update actions/upload-artifact to v4 ([#1559](https://github.com/databricks/cli/pull/1559)). + * Use Go 1.22 to build and test ([#1562](https://github.com/databricks/cli/pull/1562)). + * Move bespoke status call to main workspace files filer ([#1570](https://github.com/databricks/cli/pull/1570)). + * Add new template ([#1578](https://github.com/databricks/cli/pull/1578)). + * Add regression tests for CLI error output ([#1566](https://github.com/databricks/cli/pull/1566)). + +Dependency updates: + * Bump golang.org/x/mod from 0.18.0 to 0.19.0 ([#1576](https://github.com/databricks/cli/pull/1576)). + * Bump golang.org/x/term from 0.21.0 to 0.22.0 ([#1577](https://github.com/databricks/cli/pull/1577)). + ## 0.223.1 This bugfix release fixes missing error messages in v0.223.0. From 434bcbb01858b57b6d1d4b8f1f2a3201dc9bd82b Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 16 Jul 2024 10:57:04 +0200 Subject: [PATCH 283/286] Allow artifacts (JARs, wheels) to be uploaded to UC Volumes (#1591) ## Changes This change allows to specify UC volumes path as an artifact paths so all artifacts (JARs, wheels) are uploaded to UC Volumes. Example configuration is here: ``` bundle: name: jar-bundle workspace: host: https://foo.com artifact_path: /Volumes/main/default/foobar artifacts: my_java_code: path: ./sample-java build: "javac PrintArgs.java && jar cvfm PrintArgs.jar META-INF/MANIFEST.MF PrintArgs.class" files: - source: ./sample-java/PrintArgs.jar resources: jobs: jar_job: name: "Test Spark Jar Job" tasks: - task_key: TestSparkJarTask new_cluster: num_workers: 1 spark_version: "14.3.x-scala2.12" node_type_id: "i3.xlarge" spark_jar_task: main_class_name: PrintArgs libraries: - jar: ./sample-java/PrintArgs.jar ``` ## Tests Manually + added E2E test for Java jobs E2E test is temporarily skipped until auth related issues for UC for tests are resolved --- bundle/artifacts/artifacts.go | 24 ++++- bundle/artifacts/artifacts_test.go | 91 ++++++++++++++++++- bundle/artifacts/build.go | 64 ++++++++----- bundle/artifacts/upload.go | 19 ++-- internal/bundle/artifacts_test.go | 69 ++++++++++++++ .../databricks_template_schema.json | 29 ++++++ .../template/databricks.yml.tmpl | 28 ++++++ .../{{.project_name}}/META-INF/MANIFEST.MF | 1 + .../template/{{.project_name}}/PrintArgs.java | 8 ++ internal/bundle/helpers.go | 6 +- internal/bundle/spark_jar_test.go | 52 +++++++++++ internal/helpers.go | 4 +- 12 files changed, 357 insertions(+), 38 deletions(-) create mode 100644 internal/bundle/bundles/spark_jar_task/databricks_template_schema.json create mode 100644 internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl create mode 100644 internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF create mode 100644 internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java create mode 100644 internal/bundle/spark_jar_test.go diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index a5f41ae4b..15565cd60 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -8,6 +8,7 @@ import ( "os" "path" "path/filepath" + "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" @@ -17,6 +18,7 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" ) type mutatorFactory = func(name string) bundle.Mutator @@ -103,7 +105,7 @@ func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost return diag.FromErr(err) } - client, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath) + client, err := getFilerForArtifacts(b.WorkspaceClient(), uploadPath) if err != nil { return diag.FromErr(err) } @@ -116,6 +118,17 @@ func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost return nil } +func getFilerForArtifacts(w *databricks.WorkspaceClient, uploadPath string) (filer.Filer, error) { + if isVolumesPath(uploadPath) { + return filer.NewFilesClient(w, uploadPath) + } + return filer.NewWorkspaceFilesClient(w, uploadPath) +} + +func isVolumesPath(path string) bool { + return strings.HasPrefix(path, "/Volumes/") +} + func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, uploadPath string, client filer.Filer) error { for i := range a.Files { f := &a.Files[i] @@ -130,14 +143,15 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u log.Infof(ctx, "Upload succeeded") f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) + remotePath := f.RemotePath - // TODO: confirm if we still need to update the remote path to start with /Workspace - wsfsBase := "/Workspace" - remotePath := path.Join(wsfsBase, f.RemotePath) + if !strings.HasPrefix(f.RemotePath, "/Workspace/") && !strings.HasPrefix(f.RemotePath, "/Volumes/") { + wsfsBase := "/Workspace" + remotePath = path.Join(wsfsBase, f.RemotePath) + } for _, job := range b.Config.Resources.Jobs { rewriteArtifactPath(b, f, job, remotePath) - } } diff --git a/bundle/artifacts/artifacts_test.go b/bundle/artifacts/artifacts_test.go index 53c2798ed..6d85f3af9 100644 --- a/bundle/artifacts/artifacts_test.go +++ b/bundle/artifacts/artifacts_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestArtifactUpload(t *testing.T) { +func TestArtifactUploadForWorkspace(t *testing.T) { tmpDir := t.TempDir() whlFolder := filepath.Join(tmpDir, "whl") testutil.Touch(t, whlFolder, "source.whl") @@ -105,3 +105,92 @@ func TestArtifactUpload(t *testing.T) { require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl) require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl) } + +func TestArtifactUploadForVolumes(t *testing.T) { + tmpDir := t.TempDir() + whlFolder := filepath.Join(tmpDir, "whl") + testutil.Touch(t, whlFolder, "source.whl") + whlLocalPath := filepath.Join(whlFolder, "source.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Volumes/foo/bar/artifacts", + }, + Artifacts: config.Artifacts{ + "whl": { + Type: config.ArtifactPythonWheel, + Files: []config.ArtifactFile{ + {Source: whlLocalPath}, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Volumes/some/path/mywheel.whl", + }, + }, + }, + { + ForEachTask: &jobs.ForEachTask{ + Task: jobs.Task{ + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Volumes/some/path/mywheel.whl", + }, + }, + }, + }, + }, + }, + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + filepath.Join("whl", "source.whl"), + "/Volumes/some/path/mywheel.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + artifact := b.Config.Artifacts["whl"] + mockFiler := mockfiler.NewMockFiler(t) + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source.whl"), + mock.AnythingOfType("*bytes.Reader"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil) + + err := uploadArtifact(context.Background(), b, artifact, "/Volumes/foo/bar/artifacts", mockFiler) + require.NoError(t, err) + + // Test that libraries path is updated + require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl) + require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl) + require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) + require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) + require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl) + require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl) +} diff --git a/bundle/artifacts/build.go b/bundle/artifacts/build.go index 722891ada..c8c3bf67c 100644 --- a/bundle/artifacts/build.go +++ b/bundle/artifacts/build.go @@ -44,27 +44,6 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { } } - // Expand any glob reference in files source path - files := make([]config.ArtifactFile, 0, len(artifact.Files)) - for _, f := range artifact.Files { - matches, err := filepath.Glob(f.Source) - if err != nil { - return diag.Errorf("unable to find files for %s: %v", f.Source, err) - } - - if len(matches) == 0 { - return diag.Errorf("no files found for %s", f.Source) - } - - for _, match := range matches { - files = append(files, config.ArtifactFile{ - Source: match, - }) - } - } - - artifact.Files = files - // Skip building if build command is not specified or infered if artifact.BuildCommand == "" { // If no build command was specified or infered and there is no @@ -72,7 +51,11 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if len(artifact.Files) == 0 { return diag.Errorf("misconfigured artifact: please specify 'build' or 'files' property") } - return nil + + // We can skip calling build mutator if there is no build command + // But we still need to expand glob references in files source path. + diags := expandGlobReference(artifact) + return diags } // If artifact path is not provided, use bundle root dir @@ -85,5 +68,40 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact.Path = filepath.Join(dirPath, artifact.Path) } - return bundle.Apply(ctx, b, getBuildMutator(artifact.Type, m.name)) + diags := bundle.Apply(ctx, b, getBuildMutator(artifact.Type, m.name)) + if diags.HasError() { + return diags + } + + // We need to expand glob reference after build mutator is applied because + // if we do it before, any files that are generated by build command will + // not be included into artifact.Files and thus will not be uploaded. + d := expandGlobReference(artifact) + return diags.Extend(d) +} + +func expandGlobReference(artifact *config.Artifact) diag.Diagnostics { + var diags diag.Diagnostics + + // Expand any glob reference in files source path + files := make([]config.ArtifactFile, 0, len(artifact.Files)) + for _, f := range artifact.Files { + matches, err := filepath.Glob(f.Source) + if err != nil { + return diags.Extend(diag.Errorf("unable to find files for %s: %v", f.Source, err)) + } + + if len(matches) == 0 { + return diags.Extend(diag.Errorf("no files found for %s", f.Source)) + } + + for _, match := range matches { + files = append(files, config.ArtifactFile{ + Source: match, + }) + } + } + + artifact.Files = files + return diags } diff --git a/bundle/artifacts/upload.go b/bundle/artifacts/upload.go index 5c12c9444..3af50021e 100644 --- a/bundle/artifacts/upload.go +++ b/bundle/artifacts/upload.go @@ -6,7 +6,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" - "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/log" ) func UploadAll() bundle.Mutator { @@ -57,12 +58,18 @@ func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics return diag.FromErr(err) } - b.WorkspaceClient().Workspace.Delete(ctx, workspace.Delete{ - Path: uploadPath, - Recursive: true, - }) + client, err := getFilerForArtifacts(b.WorkspaceClient(), uploadPath) + if err != nil { + return diag.FromErr(err) + } - err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, uploadPath) + // We intentionally ignore the error because it is not critical to the deployment + err = client.Delete(ctx, ".", filer.DeleteRecursively) + if err != nil { + log.Errorf(ctx, "failed to delete %s: %v", uploadPath, err) + } + + err = client.Mkdir(ctx, ".") if err != nil { return diag.Errorf("unable to create directory for %s: %v", uploadPath, err) } diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 222b23047..46c236a4e 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -153,3 +153,72 @@ func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) b.Config.Resources.Jobs["test"].JobSettings.Environments[0].Spec.Dependencies[0], ) } + +func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + volumePath := internal.TemporaryUcVolume(t, w) + + dir := t.TempDir() + whlPath := filepath.Join(dir, "dist", "test.whl") + touchEmptyFile(t, whlPath) + + b := &bundle.Bundle{ + RootPath: dir, + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + }, + Workspace: config.Workspace{ + ArtifactPath: volumePath, + }, + Artifacts: config.Artifacts{ + "test": &config.Artifact{ + Type: "whl", + Files: []config.ArtifactFile{ + { + Source: whlPath, + }, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: "dist/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) + require.NoError(t, diags.Error()) + + // The remote path attribute on the artifact file should have been set. + require.Regexp(t, + regexp.MustCompile(path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`)), + b.Config.Artifacts["test"].Files[0].RemotePath, + ) + + // The task library path should have been updated to the remote path. + require.Regexp(t, + regexp.MustCompile(path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`)), + b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl, + ) +} diff --git a/internal/bundle/bundles/spark_jar_task/databricks_template_schema.json b/internal/bundle/bundles/spark_jar_task/databricks_template_schema.json new file mode 100644 index 000000000..078dff976 --- /dev/null +++ b/internal/bundle/bundles/spark_jar_task/databricks_template_schema.json @@ -0,0 +1,29 @@ +{ + "properties": { + "project_name": { + "type": "string", + "default": "my_java_project", + "description": "Unique name for this project" + }, + "spark_version": { + "type": "string", + "description": "Spark version used for job cluster" + }, + "node_type_id": { + "type": "string", + "description": "Node type id for job cluster" + }, + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + }, + "root": { + "type": "string", + "description": "Path to the root of the template" + }, + "artifact_path": { + "type": "string", + "description": "Path to the remote base path for artifacts" + } + } +} diff --git a/internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl b/internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl new file mode 100644 index 000000000..24a6d7d8a --- /dev/null +++ b/internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl @@ -0,0 +1,28 @@ +bundle: + name: spark-jar-task + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + artifact_path: {{.artifact_path}} + +artifacts: + my_java_code: + path: ./{{.project_name}} + build: "javac PrintArgs.java && jar cvfm PrintArgs.jar META-INF/MANIFEST.MF PrintArgs.class" + files: + - source: ./{{.project_name}}/PrintArgs.jar + +resources: + jobs: + jar_job: + name: "[${bundle.target}] Test Spark Jar Job {{.unique_id}}" + tasks: + - task_key: TestSparkJarTask + new_cluster: + num_workers: 1 + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + spark_jar_task: + main_class_name: PrintArgs + libraries: + - jar: ./{{.project_name}}/PrintArgs.jar diff --git a/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF b/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF new file mode 100644 index 000000000..40b023dbd --- /dev/null +++ b/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF @@ -0,0 +1 @@ +Main-Class: PrintArgs \ No newline at end of file diff --git a/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java b/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java new file mode 100644 index 000000000..b7430f25f --- /dev/null +++ b/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java @@ -0,0 +1,8 @@ +import java.util.Arrays; + +public class PrintArgs { + public static void main(String[] args) { + System.out.println("Hello from Jar!"); + System.out.println(Arrays.toString(args)); + } +} diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index a17964b16..c33c15331 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -21,9 +21,13 @@ import ( const defaultSparkVersion = "13.3.x-snapshot-scala2.12" func initTestTemplate(t *testing.T, ctx context.Context, templateName string, config map[string]any) (string, error) { + bundleRoot := t.TempDir() + return initTestTemplateWithBundleRoot(t, ctx, templateName, config, bundleRoot) +} + +func initTestTemplateWithBundleRoot(t *testing.T, ctx context.Context, templateName string, config map[string]any, bundleRoot string) (string, error) { templateRoot := filepath.Join("bundles", templateName) - bundleRoot := t.TempDir() configFilePath, err := writeConfigFile(t, config) if err != nil { return "", err diff --git a/internal/bundle/spark_jar_test.go b/internal/bundle/spark_jar_test.go new file mode 100644 index 000000000..c981e7750 --- /dev/null +++ b/internal/bundle/spark_jar_test.go @@ -0,0 +1,52 @@ +package bundle + +import ( + "os" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func runSparkJarTest(t *testing.T, sparkVersion string) { + t.Skip("Temporarily skipping the test until auth / permission issues for UC volumes are resolved.") + + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + volumePath := internal.TemporaryUcVolume(t, w) + + nodeTypeId := internal.GetNodeTypeId(env) + tmpDir := t.TempDir() + bundleRoot, err := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{ + "node_type_id": nodeTypeId, + "unique_id": uuid.New().String(), + "spark_version": sparkVersion, + "root": tmpDir, + "artifact_path": volumePath, + }, tmpDir) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + t.Cleanup(func() { + destroyBundle(t, ctx, bundleRoot) + }) + + out, err := runResource(t, ctx, bundleRoot, "jar_job") + require.NoError(t, err) + require.Contains(t, out, "Hello from Jar!") +} + +func TestAccSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { + runSparkJarTest(t, "14.3.x-scala2.12") +} diff --git a/internal/helpers.go b/internal/helpers.go index 67a258ba4..972a2322b 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -472,7 +472,7 @@ func TemporaryDbfsDir(t *testing.T, w *databricks.WorkspaceClient) string { } // Create a new UC volume in a catalog called "main" in the workspace. -func temporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { +func TemporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { ctx := context.Background() // Create a schema @@ -607,7 +607,7 @@ func setupUcVolumesFiler(t *testing.T) (filer.Filer, string) { w, err := databricks.NewWorkspaceClient() require.NoError(t, err) - tmpDir := temporaryUcVolume(t, w) + tmpDir := TemporaryUcVolume(t, w) f, err := filer.NewFilesClient(w, tmpDir) require.NoError(t, err) From 39c2633773bd6a90cfe85216db34e2e0adbeca6b Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 16 Jul 2024 15:31:58 +0530 Subject: [PATCH 284/286] Add UUID to uniquely identify a deployment state (#1595) ## Changes We need a mechanism to invalidate the locally cached deployment state if a user uses the same working directory to deploy to multiple distinct deployments (separate targets, root_paths or even hosts). This PR just adds the UUID to the deployment state in preparation for invalidating this cache. The actual invalidation will follow up at a later date (tracked in internal backlog). ## Tests Unit test. Manually checked the deployment state is actually being written. --- bundle/deploy/state.go | 4 ++++ bundle/deploy/state_update.go | 6 ++++++ bundle/deploy/state_update_test.go | 8 ++++++++ 3 files changed, 18 insertions(+) diff --git a/bundle/deploy/state.go b/bundle/deploy/state.go index 97048811b..4f2bc4ee4 100644 --- a/bundle/deploy/state.go +++ b/bundle/deploy/state.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/vfs" + "github.com/google/uuid" ) const DeploymentStateFileName = "deployment.json" @@ -46,6 +47,9 @@ type DeploymentState struct { // Files is a list of files which has been deployed as part of this deployment. Files Filelist `json:"files"` + + // UUID uniquely identifying the deployment. + ID uuid.UUID `json:"id"` } // We use this entry type as a proxy to fs.DirEntry. diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go index bfdb308c4..9ab1bacf1 100644 --- a/bundle/deploy/state_update.go +++ b/bundle/deploy/state_update.go @@ -14,6 +14,7 @@ import ( "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" + "github.com/google/uuid" ) type stateUpdate struct { @@ -46,6 +47,11 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost } state.Files = fl + // Generate a UUID for the deployment, if one does not already exist + if state.ID == uuid.Nil { + state.ID = uuid.New() + } + statePath, err := getPathToStateFile(ctx, b) if err != nil { return diag.FromErr(err) diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go index ed72439d2..2982546d5 100644 --- a/bundle/deploy/state_update_test.go +++ b/bundle/deploy/state_update_test.go @@ -13,6 +13,7 @@ import ( "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/google/uuid" "github.com/stretchr/testify/require" ) @@ -88,6 +89,9 @@ func TestStateUpdate(t *testing.T) { }, }) require.Equal(t, build.GetInfo().Version, state.CliVersion) + + // Valid non-empty UUID is generated. + require.NotEqual(t, uuid.Nil, state.ID) } func TestStateUpdateWithExistingState(t *testing.T) { @@ -109,6 +113,7 @@ func TestStateUpdateWithExistingState(t *testing.T) { LocalPath: "bar/t1.py", }, }, + ID: uuid.MustParse("123e4567-e89b-12d3-a456-426614174000"), } data, err := json.Marshal(state) @@ -135,4 +140,7 @@ func TestStateUpdateWithExistingState(t *testing.T) { }, }) require.Equal(t, build.GetInfo().Version, state.CliVersion) + + // Existing UUID is not overwritten. + require.Equal(t, uuid.MustParse("123e4567-e89b-12d3-a456-426614174000"), state.ID) } From 8ed996448206e5870a1d026331a88fd6392c3ede Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 16 Jul 2024 16:57:27 +0530 Subject: [PATCH 285/286] Track multiple locations associated with a `dyn.Value` (#1510) ## Changes This PR changes the location metadata associated with a `dyn.Value` to a slice of locations. This will allow us to keep track of location metadata across merges and overrides. The convention is to treat the first location in the slice as the primary location. Also, the semantics are the same as before if there's only one location associated with a value, that is: 1. For complex values (maps, sequences) the location of the v1 is primary in Merge(v1, v2) 2. For primitive values the location of v2 is primary in Merge(v1, v2) ## Tests Modifying existing merge unit tests. Other existing unit tests and integration tests pass. --------- Co-authored-by: Pieter Noordhuis --- bundle/config/generate/job.go | 2 +- .../mutator/expand_pipeline_glob_paths.go | 4 +- .../mutator/python/python_mutator_test.go | 20 +- bundle/config/mutator/rewrite_sync_paths.go | 2 +- bundle/config/mutator/translate_paths.go | 2 +- bundle/config/root.go | 10 +- bundle/internal/bundletest/location.go | 4 +- libs/dyn/convert/from_typed.go | 4 +- libs/dyn/convert/from_typed_test.go | 60 ++--- libs/dyn/convert/normalize.go | 16 +- libs/dyn/convert/normalize_test.go | 54 ++-- libs/dyn/dynvar/resolve.go | 4 +- libs/dyn/merge/elements_by_key.go | 2 +- libs/dyn/merge/merge.go | 35 ++- libs/dyn/merge/merge_test.go | 138 +++++++++-- libs/dyn/merge/override.go | 4 +- libs/dyn/merge/override_test.go | 233 +++++++++--------- libs/dyn/pattern.go | 4 +- libs/dyn/value.go | 48 +++- libs/dyn/value_test.go | 11 +- libs/dyn/value_underlying_test.go | 4 +- libs/dyn/visit_map.go | 4 +- libs/dyn/yamlloader/loader.go | 22 +- libs/dyn/yamlsaver/saver_test.go | 65 ++--- libs/dyn/yamlsaver/utils.go | 2 +- libs/dyn/yamlsaver/utils_test.go | 31 ++- 26 files changed, 472 insertions(+), 313 deletions(-) diff --git a/bundle/config/generate/job.go b/bundle/config/generate/job.go index 3ab5e0122..28bc86412 100644 --- a/bundle/config/generate/job.go +++ b/bundle/config/generate/job.go @@ -22,7 +22,7 @@ func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) { tasks = append(tasks, v) } // We're using location lines to define the order of keys in exported YAML. - value["tasks"] = dyn.NewValue(tasks, dyn.Location{Line: jobOrder.Get("tasks")}) + value["tasks"] = dyn.NewValue(tasks, []dyn.Location{{Line: jobOrder.Get("tasks")}}) } return yamlsaver.ConvertToMapValue(job.Settings, jobOrder, []string{"format", "new_cluster", "existing_cluster_id"}, value) diff --git a/bundle/config/mutator/expand_pipeline_glob_paths.go b/bundle/config/mutator/expand_pipeline_glob_paths.go index 268d8fa48..5703332fa 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths.go @@ -59,7 +59,7 @@ func (m *expandPipelineGlobPaths) expandLibrary(v dyn.Value) ([]dyn.Value, error if err != nil { return nil, err } - nv, err := dyn.SetByPath(v, p, dyn.NewValue(m, pv.Location())) + nv, err := dyn.SetByPath(v, p, dyn.NewValue(m, pv.Locations())) if err != nil { return nil, err } @@ -90,7 +90,7 @@ func (m *expandPipelineGlobPaths) expandSequence(p dyn.Path, v dyn.Value) (dyn.V vs = append(vs, v...) } - return dyn.NewValue(vs, v.Location()), nil + return dyn.NewValue(vs, v.Locations()), nil } func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go index 9a0ed8c3a..588589831 100644 --- a/bundle/config/mutator/python/python_mutator_test.go +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -305,8 +305,8 @@ type createOverrideVisitorTestCase struct { } func TestCreateOverrideVisitor(t *testing.T) { - left := dyn.NewValue(42, dyn.Location{}) - right := dyn.NewValue(1337, dyn.Location{}) + left := dyn.V(42) + right := dyn.V(1337) testCases := []createOverrideVisitorTestCase{ { @@ -470,21 +470,21 @@ func TestCreateOverrideVisitor_omitempty(t *testing.T) { // this is not happening, but adding for completeness name: "undo delete of empty variables", path: dyn.MustPathFromString("variables"), - left: dyn.NewValue([]dyn.Value{}, location), + left: dyn.NewValue([]dyn.Value{}, []dyn.Location{location}), expectedErr: merge.ErrOverrideUndoDelete, phases: allPhases, }, { name: "undo delete of empty job clusters", path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"), - left: dyn.NewValue([]dyn.Value{}, location), + left: dyn.NewValue([]dyn.Value{}, []dyn.Location{location}), expectedErr: merge.ErrOverrideUndoDelete, phases: allPhases, }, { name: "allow delete of non-empty job clusters", path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"), - left: dyn.NewValue([]dyn.Value{dyn.NewValue("abc", location)}, location), + left: dyn.NewValue([]dyn.Value{dyn.NewValue("abc", []dyn.Location{location})}, []dyn.Location{location}), expectedErr: nil, // deletions aren't allowed in 'load' phase phases: []phase{PythonMutatorPhaseInit}, @@ -492,17 +492,15 @@ func TestCreateOverrideVisitor_omitempty(t *testing.T) { { name: "undo delete of empty tags", path: dyn.MustPathFromString("resources.jobs.job0.tags"), - left: dyn.NewValue(map[string]dyn.Value{}, location), + left: dyn.NewValue(map[string]dyn.Value{}, []dyn.Location{location}), expectedErr: merge.ErrOverrideUndoDelete, phases: allPhases, }, { name: "allow delete of non-empty tags", path: dyn.MustPathFromString("resources.jobs.job0.tags"), - left: dyn.NewValue( - map[string]dyn.Value{"dev": dyn.NewValue("true", location)}, - location, - ), + left: dyn.NewValue(map[string]dyn.Value{"dev": dyn.NewValue("true", []dyn.Location{location})}, []dyn.Location{location}), + expectedErr: nil, // deletions aren't allowed in 'load' phase phases: []phase{PythonMutatorPhaseInit}, @@ -510,7 +508,7 @@ func TestCreateOverrideVisitor_omitempty(t *testing.T) { { name: "undo delete of nil", path: dyn.MustPathFromString("resources.jobs.job0.tags"), - left: dyn.NilValue.WithLocation(location), + left: dyn.NilValue.WithLocations([]dyn.Location{location}), expectedErr: merge.ErrOverrideUndoDelete, phases: allPhases, }, diff --git a/bundle/config/mutator/rewrite_sync_paths.go b/bundle/config/mutator/rewrite_sync_paths.go index 85db79797..cfdc55f36 100644 --- a/bundle/config/mutator/rewrite_sync_paths.go +++ b/bundle/config/mutator/rewrite_sync_paths.go @@ -38,7 +38,7 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc { return dyn.InvalidValue, err } - return dyn.NewValue(filepath.Join(rel, v.MustString()), v.Location()), nil + return dyn.NewValue(filepath.Join(rel, v.MustString()), v.Locations()), nil } } diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index a01d3d6a7..28f7d3d30 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -182,7 +182,7 @@ func (t *translateContext) rewriteValue(p dyn.Path, v dyn.Value, fn rewriteFunc, return dyn.InvalidValue, err } - return dyn.NewValue(out, v.Location()), nil + return dyn.NewValue(out, v.Locations()), nil } func (t *translateContext) rewriteRelativeTo(p dyn.Path, v dyn.Value, fn rewriteFunc, dir, fallback string) (dyn.Value, error) { diff --git a/bundle/config/root.go b/bundle/config/root.go index 2bbb78696..594a9105f 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -378,7 +378,7 @@ func (r *Root) MergeTargetOverrides(name string) error { // Below, we're setting fields on the bundle key, so make sure it exists. if root.Get("bundle").Kind() == dyn.KindInvalid { - root, err = dyn.Set(root, "bundle", dyn.NewValue(map[string]dyn.Value{}, dyn.Location{})) + root, err = dyn.Set(root, "bundle", dyn.V(map[string]dyn.Value{})) if err != nil { return err } @@ -404,7 +404,7 @@ func (r *Root) MergeTargetOverrides(name string) error { if v := target.Get("git"); v.Kind() != dyn.KindInvalid { ref, err := dyn.GetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("git"))) if err != nil { - ref = dyn.NewValue(map[string]dyn.Value{}, dyn.Location{}) + ref = dyn.V(map[string]dyn.Value{}) } // Merge the override into the reference. @@ -415,7 +415,7 @@ func (r *Root) MergeTargetOverrides(name string) error { // If the branch was overridden, we need to clear the inferred flag. if branch := v.Get("branch"); branch.Kind() != dyn.KindInvalid { - out, err = dyn.SetByPath(out, dyn.NewPath(dyn.Key("inferred")), dyn.NewValue(false, dyn.Location{})) + out, err = dyn.SetByPath(out, dyn.NewPath(dyn.Key("inferred")), dyn.V(false)) if err != nil { return err } @@ -456,7 +456,7 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { // configuration will convert this to a string if necessary. return dyn.NewValue(map[string]dyn.Value{ "default": variable, - }, variable.Location()), nil + }, variable.Locations()), nil case dyn.KindMap, dyn.KindSequence: // Check if the original definition of variable has a type field. @@ -469,7 +469,7 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { return dyn.NewValue(map[string]dyn.Value{ "type": typeV, "default": variable, - }, variable.Location()), nil + }, variable.Locations()), nil } return variable, nil diff --git a/bundle/internal/bundletest/location.go b/bundle/internal/bundletest/location.go index 1fd6f968c..ebec43d30 100644 --- a/bundle/internal/bundletest/location.go +++ b/bundle/internal/bundletest/location.go @@ -14,9 +14,9 @@ func SetLocation(b *bundle.Bundle, prefix string, filePath string) { return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { // If the path has the given prefix, set the location. if p.HasPrefix(start) { - return v.WithLocation(dyn.Location{ + return v.WithLocations([]dyn.Location{{ File: filePath, - }), nil + }}), nil } // The path is not nested under the given prefix. diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index e8d321f66..cd92ad0eb 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -42,7 +42,7 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, // Dereference pointer if necessary for srcv.Kind() == reflect.Pointer { if srcv.IsNil() { - return dyn.NilValue.WithLocation(ref.Location()), nil + return dyn.NilValue.WithLocations(ref.Locations()), nil } srcv = srcv.Elem() @@ -83,7 +83,7 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, if err != nil { return dyn.InvalidValue, err } - return v.WithLocation(ref.Location()), err + return v.WithLocations(ref.Locations()), err } func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index 9141a6948..0cddff3be 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -115,16 +115,16 @@ func TestFromTypedStructSetFieldsRetainLocation(t *testing.T) { } ref := dyn.V(map[string]dyn.Value{ - "foo": dyn.NewValue("bar", dyn.Location{File: "foo"}), - "bar": dyn.NewValue("baz", dyn.Location{File: "bar"}), + "foo": dyn.NewValue("bar", []dyn.Location{{File: "foo"}}), + "bar": dyn.NewValue("baz", []dyn.Location{{File: "bar"}}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) // Assert foo and bar have retained their location. - assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - assert.Equal(t, dyn.NewValue("qux", dyn.Location{File: "bar"}), nv.Get("bar")) + assert.Equal(t, dyn.NewValue("bar", []dyn.Location{{File: "foo"}}), nv.Get("foo")) + assert.Equal(t, dyn.NewValue("qux", []dyn.Location{{File: "bar"}}), nv.Get("bar")) } func TestFromTypedStringMapWithZeroValue(t *testing.T) { @@ -359,16 +359,16 @@ func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { } ref := dyn.V(map[string]dyn.Value{ - "foo": dyn.NewValue("bar", dyn.Location{File: "foo"}), - "bar": dyn.NewValue("baz", dyn.Location{File: "bar"}), + "foo": dyn.NewValue("bar", []dyn.Location{{File: "foo"}}), + "bar": dyn.NewValue("baz", []dyn.Location{{File: "bar"}}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) // Assert foo and bar have retained their locations. - assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - assert.Equal(t, dyn.NewValue("qux", dyn.Location{File: "bar"}), nv.Get("bar")) + assert.Equal(t, dyn.NewValue("bar", []dyn.Location{{File: "foo"}}), nv.Get("foo")) + assert.Equal(t, dyn.NewValue("qux", []dyn.Location{{File: "bar"}}), nv.Get("bar")) } func TestFromTypedMapFieldWithZeroValue(t *testing.T) { @@ -432,16 +432,16 @@ func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { } ref := dyn.V([]dyn.Value{ - dyn.NewValue("foo", dyn.Location{File: "foo"}), - dyn.NewValue("bar", dyn.Location{File: "bar"}), + dyn.NewValue("foo", []dyn.Location{{File: "foo"}}), + dyn.NewValue("bar", []dyn.Location{{File: "bar"}}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) // Assert foo and bar have retained their locations. - assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv.Index(0)) - assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "bar"}), nv.Index(1)) + assert.Equal(t, dyn.NewValue("foo", []dyn.Location{{File: "foo"}}), nv.Index(0)) + assert.Equal(t, dyn.NewValue("bar", []dyn.Location{{File: "bar"}}), nv.Index(1)) } func TestFromTypedStringEmpty(t *testing.T) { @@ -477,19 +477,19 @@ func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { } func TestFromTypedStringRetainsLocations(t *testing.T) { - var ref = dyn.NewValue("foo", dyn.Location{File: "foo"}) + var ref = dyn.NewValue("foo", []dyn.Location{{File: "foo"}}) // case: value has not been changed var src string = "foo" nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue("foo", []dyn.Location{{File: "foo"}}), nv) // case: value has been changed src = "bar" nv, err = FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue("bar", []dyn.Location{{File: "foo"}}), nv) } func TestFromTypedStringTypeError(t *testing.T) { @@ -532,19 +532,19 @@ func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { } func TestFromTypedBoolRetainsLocations(t *testing.T) { - var ref = dyn.NewValue(true, dyn.Location{File: "foo"}) + var ref = dyn.NewValue(true, []dyn.Location{{File: "foo"}}) // case: value has not been changed var src bool = true nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(true, dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(true, []dyn.Location{{File: "foo"}}), nv) // case: value has been changed src = false nv, err = FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(false, dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(false, []dyn.Location{{File: "foo"}}), nv) } func TestFromTypedBoolVariableReference(t *testing.T) { @@ -595,19 +595,19 @@ func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { } func TestFromTypedIntRetainsLocations(t *testing.T) { - var ref = dyn.NewValue(1234, dyn.Location{File: "foo"}) + var ref = dyn.NewValue(1234, []dyn.Location{{File: "foo"}}) // case: value has not been changed var src int = 1234 nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(1234, dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(1234, []dyn.Location{{File: "foo"}}), nv) // case: value has been changed src = 1235 nv, err = FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(int64(1235), dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(int64(1235), []dyn.Location{{File: "foo"}}), nv) } func TestFromTypedIntVariableReference(t *testing.T) { @@ -659,19 +659,19 @@ func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { func TestFromTypedFloatRetainsLocations(t *testing.T) { var src float64 - var ref = dyn.NewValue(1.23, dyn.Location{File: "foo"}) + var ref = dyn.NewValue(1.23, []dyn.Location{{File: "foo"}}) // case: value has not been changed src = 1.23 nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(1.23, dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(1.23, []dyn.Location{{File: "foo"}}), nv) // case: value has been changed src = 1.24 nv, err = FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(1.24, dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(1.24, []dyn.Location{{File: "foo"}}), nv) } func TestFromTypedFloatVariableReference(t *testing.T) { @@ -740,27 +740,27 @@ func TestFromTypedNilPointerRetainsLocations(t *testing.T) { } var src *Tmp - ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + ref := dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) + assert.Equal(t, dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}), nv) } func TestFromTypedNilMapRetainsLocation(t *testing.T) { var src map[string]string - ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + ref := dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) + assert.Equal(t, dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}), nv) } func TestFromTypedNilSliceRetainsLocation(t *testing.T) { var src []string - ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + ref := dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) + assert.Equal(t, dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}), nv) } diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index ad82e20ef..246c97eaf 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -120,7 +120,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen // Return the normalized value if missing fields are not included. if !n.includeMissingFields { - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags } // Populate missing fields with their zero values. @@ -165,7 +165,7 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen } } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags case dyn.KindNil: return src, diags @@ -203,7 +203,7 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []r out.Set(pk, nv) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags case dyn.KindNil: return src, diags @@ -238,7 +238,7 @@ func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value, seen [ out = append(out, v) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags case dyn.KindNil: return src, diags @@ -273,7 +273,7 @@ func (n normalizeOptions) normalizeString(typ reflect.Type, src dyn.Value, path return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindString, src, path)) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags } func (n normalizeOptions) normalizeBool(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { @@ -306,7 +306,7 @@ func (n normalizeOptions) normalizeBool(typ reflect.Type, src dyn.Value, path dy return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src, path)) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags } func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { @@ -349,7 +349,7 @@ func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value, path dyn return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindInt, src, path)) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags } func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { @@ -392,7 +392,7 @@ func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value, path d return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindFloat, src, path)) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags } func (n normalizeOptions) normalizeInterface(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 299ffcabd..452ed4eb1 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -229,7 +229,7 @@ func TestNormalizeStructVariableReference(t *testing.T) { } var typ Tmp - vin := dyn.NewValue("${var.foo}", dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue("${var.foo}", []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(typ, vin) assert.Empty(t, err) assert.Equal(t, vin, vout) @@ -241,7 +241,7 @@ func TestNormalizeStructRandomStringError(t *testing.T) { } var typ Tmp - vin := dyn.NewValue("var foo", dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue("var foo", []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -258,7 +258,7 @@ func TestNormalizeStructIntError(t *testing.T) { } var typ Tmp - vin := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(1, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -360,7 +360,7 @@ func TestNormalizeMapNestedError(t *testing.T) { func TestNormalizeMapVariableReference(t *testing.T) { var typ map[string]string - vin := dyn.NewValue("${var.foo}", dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue("${var.foo}", []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(typ, vin) assert.Empty(t, err) assert.Equal(t, vin, vout) @@ -368,7 +368,7 @@ func TestNormalizeMapVariableReference(t *testing.T) { func TestNormalizeMapRandomStringError(t *testing.T) { var typ map[string]string - vin := dyn.NewValue("var foo", dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue("var foo", []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -381,7 +381,7 @@ func TestNormalizeMapRandomStringError(t *testing.T) { func TestNormalizeMapIntError(t *testing.T) { var typ map[string]string - vin := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(1, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -482,7 +482,7 @@ func TestNormalizeSliceNestedError(t *testing.T) { func TestNormalizeSliceVariableReference(t *testing.T) { var typ []string - vin := dyn.NewValue("${var.foo}", dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue("${var.foo}", []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(typ, vin) assert.Empty(t, err) assert.Equal(t, vin, vout) @@ -490,7 +490,7 @@ func TestNormalizeSliceVariableReference(t *testing.T) { func TestNormalizeSliceRandomStringError(t *testing.T) { var typ []string - vin := dyn.NewValue("var foo", dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue("var foo", []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -503,7 +503,7 @@ func TestNormalizeSliceRandomStringError(t *testing.T) { func TestNormalizeSliceIntError(t *testing.T) { var typ []string - vin := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(1, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -524,7 +524,7 @@ func TestNormalizeString(t *testing.T) { func TestNormalizeStringNil(t *testing.T) { var typ string - vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -537,26 +537,26 @@ func TestNormalizeStringNil(t *testing.T) { func TestNormalizeStringFromBool(t *testing.T) { var typ string - vin := dyn.NewValue(true, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(true, []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, dyn.NewValue("true", vin.Location()), vout) + assert.Equal(t, dyn.NewValue("true", vin.Locations()), vout) } func TestNormalizeStringFromInt(t *testing.T) { var typ string - vin := dyn.NewValue(123, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(123, []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, dyn.NewValue("123", vin.Location()), vout) + assert.Equal(t, dyn.NewValue("123", vin.Locations()), vout) } func TestNormalizeStringFromFloat(t *testing.T) { var typ string - vin := dyn.NewValue(1.20, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(1.20, []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, dyn.NewValue("1.2", vin.Location()), vout) + assert.Equal(t, dyn.NewValue("1.2", vin.Locations()), vout) } func TestNormalizeStringError(t *testing.T) { @@ -582,7 +582,7 @@ func TestNormalizeBool(t *testing.T) { func TestNormalizeBoolNil(t *testing.T) { var typ bool - vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -658,7 +658,7 @@ func TestNormalizeInt(t *testing.T) { func TestNormalizeIntNil(t *testing.T) { var typ int - vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -742,7 +742,7 @@ func TestNormalizeFloat(t *testing.T) { func TestNormalizeFloatNil(t *testing.T) { var typ float64 - vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ @@ -842,26 +842,26 @@ func TestNormalizeAnchors(t *testing.T) { func TestNormalizeBoolToAny(t *testing.T) { var typ any - vin := dyn.NewValue(false, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(false, []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(&typ, vin) assert.Len(t, err, 0) - assert.Equal(t, dyn.NewValue(false, dyn.Location{File: "file", Line: 1, Column: 1}), vout) + assert.Equal(t, dyn.NewValue(false, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) } func TestNormalizeIntToAny(t *testing.T) { var typ any - vin := dyn.NewValue(10, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(10, []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(&typ, vin) assert.Len(t, err, 0) - assert.Equal(t, dyn.NewValue(10, dyn.Location{File: "file", Line: 1, Column: 1}), vout) + assert.Equal(t, dyn.NewValue(10, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) } func TestNormalizeSliceToAny(t *testing.T) { var typ any - v1 := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) - v2 := dyn.NewValue(2, dyn.Location{File: "file", Line: 1, Column: 1}) - vin := dyn.NewValue([]dyn.Value{v1, v2}, dyn.Location{File: "file", Line: 1, Column: 1}) + v1 := dyn.NewValue(1, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + v2 := dyn.NewValue(2, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vin := dyn.NewValue([]dyn.Value{v1, v2}, []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(&typ, vin) assert.Len(t, err, 0) - assert.Equal(t, dyn.NewValue([]dyn.Value{v1, v2}, dyn.Location{File: "file", Line: 1, Column: 1}), vout) + assert.Equal(t, dyn.NewValue([]dyn.Value{v1, v2}, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) } diff --git a/libs/dyn/dynvar/resolve.go b/libs/dyn/dynvar/resolve.go index d2494bc21..111da25c8 100644 --- a/libs/dyn/dynvar/resolve.go +++ b/libs/dyn/dynvar/resolve.go @@ -155,7 +155,7 @@ func (r *resolver) resolveRef(ref ref, seen []string) (dyn.Value, error) { // of where it is used. This also means that relative path resolution is done // relative to where a variable is used, not where it is defined. // - return dyn.NewValue(resolved[0].Value(), ref.value.Location()), nil + return dyn.NewValue(resolved[0].Value(), ref.value.Locations()), nil } // Not pure; perform string interpolation. @@ -178,7 +178,7 @@ func (r *resolver) resolveRef(ref ref, seen []string) (dyn.Value, error) { ref.str = strings.Replace(ref.str, ref.matches[j][0], s, 1) } - return dyn.NewValue(ref.str, ref.value.Location()), nil + return dyn.NewValue(ref.str, ref.value.Locations()), nil } func (r *resolver) resolveKey(key string, seen []string) (dyn.Value, error) { diff --git a/libs/dyn/merge/elements_by_key.go b/libs/dyn/merge/elements_by_key.go index da20ee849..e6e640d14 100644 --- a/libs/dyn/merge/elements_by_key.go +++ b/libs/dyn/merge/elements_by_key.go @@ -52,7 +52,7 @@ func (e elementsByKey) Map(_ dyn.Path, v dyn.Value) (dyn.Value, error) { out = append(out, nv) } - return dyn.NewValue(out, v.Location()), nil + return dyn.NewValue(out, v.Locations()), nil } // ElementsByKey returns a [dyn.MapFunc] that operates on a sequence diff --git a/libs/dyn/merge/merge.go b/libs/dyn/merge/merge.go index ffe000da3..29decd779 100644 --- a/libs/dyn/merge/merge.go +++ b/libs/dyn/merge/merge.go @@ -12,6 +12,26 @@ import ( // * Merging x with nil or nil with x always yields x. // * Merging maps a and b means entries from map b take precedence. // * Merging sequences a and b means concatenating them. +// +// Merging retains and accumulates the locations metadata associated with the values. +// This allows users of the module to track the provenance of values across merging of +// configuration trees, which is useful for reporting errors and warnings. +// +// Semantics for location metadata in the merged value are similar to the semantics +// for the values themselves: +// +// - When merging x with nil or nil with x, the location of x is retained. +// +// - When merging maps or sequences, the combined value retains the location of a and +// accumulates the location of b. The individual elements of the map or sequence retain +// their original locations, i.e., whether they were originally defined in a or b. +// +// The rationale for retaining location of a is that we would like to return +// the first location a bit of configuration showed up when reporting errors and warnings. +// +// - Merging primitive values means using the incoming value `b`. The location of the +// incoming value is retained and the location of the existing value `a` is accumulated. +// This is because the incoming value overwrites the existing value. func Merge(a, b dyn.Value) (dyn.Value, error) { return merge(a, b) } @@ -22,12 +42,12 @@ func merge(a, b dyn.Value) (dyn.Value, error) { // If a is nil, return b. if ak == dyn.KindNil { - return b, nil + return b.AppendLocationsFromValue(a), nil } // If b is nil, return a. if bk == dyn.KindNil { - return a, nil + return a.AppendLocationsFromValue(b), nil } // Call the appropriate merge function based on the kind of a and b. @@ -75,8 +95,8 @@ func mergeMap(a, b dyn.Value) (dyn.Value, error) { } } - // Preserve the location of the first value. - return dyn.NewValue(out, a.Location()), nil + // Preserve the location of the first value. Accumulate the locations of the second value. + return dyn.NewValue(out, a.Locations()).AppendLocationsFromValue(b), nil } func mergeSequence(a, b dyn.Value) (dyn.Value, error) { @@ -88,11 +108,10 @@ func mergeSequence(a, b dyn.Value) (dyn.Value, error) { copy(out[:], as) copy(out[len(as):], bs) - // Preserve the location of the first value. - return dyn.NewValue(out, a.Location()), nil + // Preserve the location of the first value. Accumulate the locations of the second value. + return dyn.NewValue(out, a.Locations()).AppendLocationsFromValue(b), nil } - func mergePrimitive(a, b dyn.Value) (dyn.Value, error) { // Merging primitive values means using the incoming value. - return b, nil + return b.AppendLocationsFromValue(a), nil } diff --git a/libs/dyn/merge/merge_test.go b/libs/dyn/merge/merge_test.go index 3706dbd77..4a4bf9e6c 100644 --- a/libs/dyn/merge/merge_test.go +++ b/libs/dyn/merge/merge_test.go @@ -8,15 +8,17 @@ import ( ) func TestMergeMaps(t *testing.T) { - v1 := dyn.V(map[string]dyn.Value{ - "foo": dyn.V("bar"), - "bar": dyn.V("baz"), - }) + l1 := dyn.Location{File: "file1", Line: 1, Column: 2} + v1 := dyn.NewValue(map[string]dyn.Value{ + "foo": dyn.NewValue("bar", []dyn.Location{l1}), + "bar": dyn.NewValue("baz", []dyn.Location{l1}), + }, []dyn.Location{l1}) - v2 := dyn.V(map[string]dyn.Value{ - "bar": dyn.V("qux"), - "qux": dyn.V("foo"), - }) + l2 := dyn.Location{File: "file2", Line: 3, Column: 4} + v2 := dyn.NewValue(map[string]dyn.Value{ + "bar": dyn.NewValue("qux", []dyn.Location{l2}), + "qux": dyn.NewValue("foo", []dyn.Location{l2}), + }, []dyn.Location{l2}) // Merge v2 into v1. { @@ -27,6 +29,23 @@ func TestMergeMaps(t *testing.T) { "bar": "qux", "qux": "foo", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l1, l2}, out.Locations()) + assert.Equal(t, []dyn.Location{l2, l1}, out.Get("bar").Locations()) + assert.Equal(t, []dyn.Location{l1}, out.Get("foo").Locations()) + assert.Equal(t, []dyn.Location{l2}, out.Get("qux").Locations()) + + // Location of the merged value should be the location of v1. + assert.Equal(t, l1, out.Location()) + + // Value of bar is "qux" which comes from v2. This .Location() should + // return the location of v2. + assert.Equal(t, l2, out.Get("bar").Location()) + + // Original locations of keys that were not overwritten should be preserved. + assert.Equal(t, l1, out.Get("foo").Location()) + assert.Equal(t, l2, out.Get("qux").Location()) } // Merge v1 into v2. @@ -38,30 +57,64 @@ func TestMergeMaps(t *testing.T) { "bar": "baz", "qux": "foo", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l2, l1}, out.Locations()) + assert.Equal(t, []dyn.Location{l1, l2}, out.Get("bar").Locations()) + assert.Equal(t, []dyn.Location{l1}, out.Get("foo").Locations()) + assert.Equal(t, []dyn.Location{l2}, out.Get("qux").Locations()) + + // Location of the merged value should be the location of v2. + assert.Equal(t, l2, out.Location()) + + // Value of bar is "baz" which comes from v1. This .Location() should + // return the location of v1. + assert.Equal(t, l1, out.Get("bar").Location()) + + // Original locations of keys that were not overwritten should be preserved. + assert.Equal(t, l1, out.Get("foo").Location()) + assert.Equal(t, l2, out.Get("qux").Location()) } + } func TestMergeMapsNil(t *testing.T) { - v := dyn.V(map[string]dyn.Value{ + l := dyn.Location{File: "file", Line: 1, Column: 2} + v := dyn.NewValue(map[string]dyn.Value{ "foo": dyn.V("bar"), - }) + }, []dyn.Location{l}) + + nilL := dyn.Location{File: "file", Line: 3, Column: 4} + nilV := dyn.NewValue(nil, []dyn.Location{nilL}) // Merge nil into v. { - out, err := Merge(v, dyn.NilValue) + out, err := Merge(v, nilV) assert.NoError(t, err) assert.Equal(t, map[string]any{ "foo": "bar", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l, nilL}, out.Locations()) + + // Location of the non-nil value should be returned by .Location(). + assert.Equal(t, l, out.Location()) } // Merge v into nil. { - out, err := Merge(dyn.NilValue, v) + out, err := Merge(nilV, v) assert.NoError(t, err) assert.Equal(t, map[string]any{ "foo": "bar", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l, nilL}, out.Locations()) + + // Location of the non-nil value should be returned by .Location(). + assert.Equal(t, l, out.Location()) } } @@ -81,15 +134,18 @@ func TestMergeMapsError(t *testing.T) { } func TestMergeSequences(t *testing.T) { - v1 := dyn.V([]dyn.Value{ - dyn.V("bar"), - dyn.V("baz"), - }) + l1 := dyn.Location{File: "file1", Line: 1, Column: 2} + v1 := dyn.NewValue([]dyn.Value{ + dyn.NewValue("bar", []dyn.Location{l1}), + dyn.NewValue("baz", []dyn.Location{l1}), + }, []dyn.Location{l1}) - v2 := dyn.V([]dyn.Value{ - dyn.V("qux"), - dyn.V("foo"), - }) + l2 := dyn.Location{File: "file2", Line: 3, Column: 4} + l3 := dyn.Location{File: "file3", Line: 5, Column: 6} + v2 := dyn.NewValue([]dyn.Value{ + dyn.NewValue("qux", []dyn.Location{l2}), + dyn.NewValue("foo", []dyn.Location{l3}), + }, []dyn.Location{l2, l3}) // Merge v2 into v1. { @@ -101,6 +157,18 @@ func TestMergeSequences(t *testing.T) { "qux", "foo", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l1, l2, l3}, out.Locations()) + + // Location of the merged value should be the location of v1. + assert.Equal(t, l1, out.Location()) + + // Location of the individual values should be preserved. + assert.Equal(t, l1, out.Index(0).Location()) // "bar" + assert.Equal(t, l1, out.Index(1).Location()) // "baz" + assert.Equal(t, l2, out.Index(2).Location()) // "qux" + assert.Equal(t, l3, out.Index(3).Location()) // "foo" } // Merge v1 into v2. @@ -113,6 +181,18 @@ func TestMergeSequences(t *testing.T) { "bar", "baz", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l2, l3, l1}, out.Locations()) + + // Location of the merged value should be the location of v2. + assert.Equal(t, l2, out.Location()) + + // Location of the individual values should be preserved. + assert.Equal(t, l2, out.Index(0).Location()) // "qux" + assert.Equal(t, l3, out.Index(1).Location()) // "foo" + assert.Equal(t, l1, out.Index(2).Location()) // "bar" + assert.Equal(t, l1, out.Index(3).Location()) // "baz" } } @@ -156,14 +236,22 @@ func TestMergeSequencesError(t *testing.T) { } func TestMergePrimitives(t *testing.T) { - v1 := dyn.V("bar") - v2 := dyn.V("baz") + l1 := dyn.Location{File: "file1", Line: 1, Column: 2} + l2 := dyn.Location{File: "file2", Line: 3, Column: 4} + v1 := dyn.NewValue("bar", []dyn.Location{l1}) + v2 := dyn.NewValue("baz", []dyn.Location{l2}) // Merge v2 into v1. { out, err := Merge(v1, v2) assert.NoError(t, err) assert.Equal(t, "baz", out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l2, l1}, out.Locations()) + + // Location of the merged value should be the location of v2, the second value. + assert.Equal(t, l2, out.Location()) } // Merge v1 into v2. @@ -171,6 +259,12 @@ func TestMergePrimitives(t *testing.T) { out, err := Merge(v2, v1) assert.NoError(t, err) assert.Equal(t, "bar", out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l1, l2}, out.Locations()) + + // Location of the merged value should be the location of v1, the second value. + assert.Equal(t, l1, out.Location()) } } diff --git a/libs/dyn/merge/override.go b/libs/dyn/merge/override.go index 823fb1933..7a8667cd6 100644 --- a/libs/dyn/merge/override.go +++ b/libs/dyn/merge/override.go @@ -51,7 +51,7 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri return dyn.InvalidValue, err } - return dyn.NewValue(merged, left.Location()), nil + return dyn.NewValue(merged, left.Locations()), nil case dyn.KindSequence: // some sequences are keyed, and we can detect which elements are added/removed/updated, @@ -62,7 +62,7 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri return dyn.InvalidValue, err } - return dyn.NewValue(merged, left.Location()), nil + return dyn.NewValue(merged, left.Locations()), nil case dyn.KindString: if left.MustString() == right.MustString() { diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go index d9ca97486..9d41a526e 100644 --- a/libs/dyn/merge/override_test.go +++ b/libs/dyn/merge/override_test.go @@ -27,79 +27,79 @@ func TestOverride_Primitive(t *testing.T) { { name: "string (updated)", state: visitorState{updated: []string{"root"}}, - left: dyn.NewValue("a", leftLocation), - right: dyn.NewValue("b", rightLocation), - expected: dyn.NewValue("b", rightLocation), + left: dyn.NewValue("a", []dyn.Location{leftLocation}), + right: dyn.NewValue("b", []dyn.Location{rightLocation}), + expected: dyn.NewValue("b", []dyn.Location{rightLocation}), }, { name: "string (not updated)", state: visitorState{}, - left: dyn.NewValue("a", leftLocation), - right: dyn.NewValue("a", rightLocation), - expected: dyn.NewValue("a", leftLocation), + left: dyn.NewValue("a", []dyn.Location{leftLocation}), + right: dyn.NewValue("a", []dyn.Location{rightLocation}), + expected: dyn.NewValue("a", []dyn.Location{leftLocation}), }, { name: "bool (updated)", state: visitorState{updated: []string{"root"}}, - left: dyn.NewValue(true, leftLocation), - right: dyn.NewValue(false, rightLocation), - expected: dyn.NewValue(false, rightLocation), + left: dyn.NewValue(true, []dyn.Location{leftLocation}), + right: dyn.NewValue(false, []dyn.Location{rightLocation}), + expected: dyn.NewValue(false, []dyn.Location{rightLocation}), }, { name: "bool (not updated)", state: visitorState{}, - left: dyn.NewValue(true, leftLocation), - right: dyn.NewValue(true, rightLocation), - expected: dyn.NewValue(true, leftLocation), + left: dyn.NewValue(true, []dyn.Location{leftLocation}), + right: dyn.NewValue(true, []dyn.Location{rightLocation}), + expected: dyn.NewValue(true, []dyn.Location{leftLocation}), }, { name: "int (updated)", state: visitorState{updated: []string{"root"}}, - left: dyn.NewValue(1, leftLocation), - right: dyn.NewValue(2, rightLocation), - expected: dyn.NewValue(2, rightLocation), + left: dyn.NewValue(1, []dyn.Location{leftLocation}), + right: dyn.NewValue(2, []dyn.Location{rightLocation}), + expected: dyn.NewValue(2, []dyn.Location{rightLocation}), }, { name: "int (not updated)", state: visitorState{}, - left: dyn.NewValue(int32(1), leftLocation), - right: dyn.NewValue(int64(1), rightLocation), - expected: dyn.NewValue(int32(1), leftLocation), + left: dyn.NewValue(int32(1), []dyn.Location{leftLocation}), + right: dyn.NewValue(int64(1), []dyn.Location{rightLocation}), + expected: dyn.NewValue(int32(1), []dyn.Location{leftLocation}), }, { name: "float (updated)", state: visitorState{updated: []string{"root"}}, - left: dyn.NewValue(1.0, leftLocation), - right: dyn.NewValue(2.0, rightLocation), - expected: dyn.NewValue(2.0, rightLocation), + left: dyn.NewValue(1.0, []dyn.Location{leftLocation}), + right: dyn.NewValue(2.0, []dyn.Location{rightLocation}), + expected: dyn.NewValue(2.0, []dyn.Location{rightLocation}), }, { name: "float (not updated)", state: visitorState{}, - left: dyn.NewValue(float32(1.0), leftLocation), - right: dyn.NewValue(float64(1.0), rightLocation), - expected: dyn.NewValue(float32(1.0), leftLocation), + left: dyn.NewValue(float32(1.0), []dyn.Location{leftLocation}), + right: dyn.NewValue(float64(1.0), []dyn.Location{rightLocation}), + expected: dyn.NewValue(float32(1.0), []dyn.Location{leftLocation}), }, { name: "time (updated)", state: visitorState{updated: []string{"root"}}, - left: dyn.NewValue(time.UnixMilli(10000), leftLocation), - right: dyn.NewValue(time.UnixMilli(10001), rightLocation), - expected: dyn.NewValue(time.UnixMilli(10001), rightLocation), + left: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}), + right: dyn.NewValue(time.UnixMilli(10001), []dyn.Location{rightLocation}), + expected: dyn.NewValue(time.UnixMilli(10001), []dyn.Location{rightLocation}), }, { name: "time (not updated)", state: visitorState{}, - left: dyn.NewValue(time.UnixMilli(10000), leftLocation), - right: dyn.NewValue(time.UnixMilli(10000), rightLocation), - expected: dyn.NewValue(time.UnixMilli(10000), leftLocation), + left: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}), + right: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{rightLocation}), + expected: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}), }, { name: "different types (updated)", state: visitorState{updated: []string{"root"}}, - left: dyn.NewValue("a", leftLocation), - right: dyn.NewValue(42, rightLocation), - expected: dyn.NewValue(42, rightLocation), + left: dyn.NewValue("a", []dyn.Location{leftLocation}), + right: dyn.NewValue(42, []dyn.Location{rightLocation}), + expected: dyn.NewValue(42, []dyn.Location{rightLocation}), }, { name: "map - remove 'a', update 'b'", @@ -109,23 +109,22 @@ func TestOverride_Primitive(t *testing.T) { }, left: dyn.NewValue( map[string]dyn.Value{ - "a": dyn.NewValue(42, leftLocation), - "b": dyn.NewValue(10, leftLocation), + "a": dyn.NewValue(42, []dyn.Location{leftLocation}), + "b": dyn.NewValue(10, []dyn.Location{leftLocation}), }, - leftLocation, - ), + []dyn.Location{leftLocation}), + right: dyn.NewValue( map[string]dyn.Value{ - "b": dyn.NewValue(20, rightLocation), + "b": dyn.NewValue(20, []dyn.Location{rightLocation}), }, - rightLocation, - ), + []dyn.Location{rightLocation}), + expected: dyn.NewValue( map[string]dyn.Value{ - "b": dyn.NewValue(20, rightLocation), + "b": dyn.NewValue(20, []dyn.Location{rightLocation}), }, - leftLocation, - ), + []dyn.Location{leftLocation}), }, { name: "map - add 'a'", @@ -134,24 +133,26 @@ func TestOverride_Primitive(t *testing.T) { }, left: dyn.NewValue( map[string]dyn.Value{ - "b": dyn.NewValue(10, leftLocation), + "b": dyn.NewValue(10, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), + right: dyn.NewValue( map[string]dyn.Value{ - "a": dyn.NewValue(42, rightLocation), - "b": dyn.NewValue(10, rightLocation), + "a": dyn.NewValue(42, []dyn.Location{rightLocation}), + "b": dyn.NewValue(10, []dyn.Location{rightLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), + expected: dyn.NewValue( map[string]dyn.Value{ - "a": dyn.NewValue(42, rightLocation), + "a": dyn.NewValue(42, []dyn.Location{rightLocation}), // location hasn't changed because value hasn't changed - "b": dyn.NewValue(10, leftLocation), + "b": dyn.NewValue(10, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), }, { @@ -161,23 +162,25 @@ func TestOverride_Primitive(t *testing.T) { }, left: dyn.NewValue( map[string]dyn.Value{ - "a": dyn.NewValue(42, leftLocation), - "b": dyn.NewValue(10, leftLocation), + "a": dyn.NewValue(42, []dyn.Location{leftLocation}), + "b": dyn.NewValue(10, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), + right: dyn.NewValue( map[string]dyn.Value{ - "b": dyn.NewValue(10, rightLocation), + "b": dyn.NewValue(10, []dyn.Location{rightLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), + expected: dyn.NewValue( map[string]dyn.Value{ // location hasn't changed because value hasn't changed - "b": dyn.NewValue(10, leftLocation), + "b": dyn.NewValue(10, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), }, { @@ -189,36 +192,38 @@ func TestOverride_Primitive(t *testing.T) { map[string]dyn.Value{ "jobs": dyn.NewValue( map[string]dyn.Value{ - "job_0": dyn.NewValue(42, leftLocation), + "job_0": dyn.NewValue(42, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), }, - leftLocation, + []dyn.Location{leftLocation}, ), + right: dyn.NewValue( map[string]dyn.Value{ "jobs": dyn.NewValue( map[string]dyn.Value{ - "job_0": dyn.NewValue(42, rightLocation), - "job_1": dyn.NewValue(1337, rightLocation), + "job_0": dyn.NewValue(42, []dyn.Location{rightLocation}), + "job_1": dyn.NewValue(1337, []dyn.Location{rightLocation}), }, - rightLocation, + []dyn.Location{rightLocation}, ), }, - rightLocation, + []dyn.Location{rightLocation}, ), + expected: dyn.NewValue( map[string]dyn.Value{ "jobs": dyn.NewValue( map[string]dyn.Value{ - "job_0": dyn.NewValue(42, leftLocation), - "job_1": dyn.NewValue(1337, rightLocation), + "job_0": dyn.NewValue(42, []dyn.Location{leftLocation}), + "job_1": dyn.NewValue(1337, []dyn.Location{rightLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), }, - leftLocation, + []dyn.Location{leftLocation}, ), }, { @@ -228,35 +233,35 @@ func TestOverride_Primitive(t *testing.T) { map[string]dyn.Value{ "jobs": dyn.NewValue( map[string]dyn.Value{ - "job_0": dyn.NewValue(42, leftLocation), - "job_1": dyn.NewValue(1337, rightLocation), + "job_0": dyn.NewValue(42, []dyn.Location{leftLocation}), + "job_1": dyn.NewValue(1337, []dyn.Location{rightLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), }, - leftLocation, + []dyn.Location{leftLocation}, ), right: dyn.NewValue( map[string]dyn.Value{ "jobs": dyn.NewValue( map[string]dyn.Value{ - "job_0": dyn.NewValue(42, rightLocation), + "job_0": dyn.NewValue(42, []dyn.Location{rightLocation}), }, - rightLocation, + []dyn.Location{rightLocation}, ), }, - rightLocation, + []dyn.Location{rightLocation}, ), expected: dyn.NewValue( map[string]dyn.Value{ "jobs": dyn.NewValue( map[string]dyn.Value{ - "job_0": dyn.NewValue(42, leftLocation), + "job_0": dyn.NewValue(42, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), }, - leftLocation, + []dyn.Location{leftLocation}, ), }, { @@ -264,23 +269,23 @@ func TestOverride_Primitive(t *testing.T) { state: visitorState{added: []string{"root[1]"}}, left: dyn.NewValue( []dyn.Value{ - dyn.NewValue(42, leftLocation), + dyn.NewValue(42, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), right: dyn.NewValue( []dyn.Value{ - dyn.NewValue(42, rightLocation), - dyn.NewValue(10, rightLocation), + dyn.NewValue(42, []dyn.Location{rightLocation}), + dyn.NewValue(10, []dyn.Location{rightLocation}), }, - rightLocation, + []dyn.Location{rightLocation}, ), expected: dyn.NewValue( []dyn.Value{ - dyn.NewValue(42, leftLocation), - dyn.NewValue(10, rightLocation), + dyn.NewValue(42, []dyn.Location{leftLocation}), + dyn.NewValue(10, []dyn.Location{rightLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), }, { @@ -288,67 +293,67 @@ func TestOverride_Primitive(t *testing.T) { state: visitorState{removed: []string{"root[1]"}}, left: dyn.NewValue( []dyn.Value{ - dyn.NewValue(42, leftLocation), - dyn.NewValue(10, leftLocation), + dyn.NewValue(42, []dyn.Location{leftLocation}), + dyn.NewValue(10, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), right: dyn.NewValue( []dyn.Value{ - dyn.NewValue(42, rightLocation), + dyn.NewValue(42, []dyn.Location{rightLocation}), }, - rightLocation, + []dyn.Location{rightLocation}, ), expected: dyn.NewValue( []dyn.Value{ - // location hasn't changed because value hasn't changed - dyn.NewValue(42, leftLocation), + dyn.NewValue(42, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), + // location hasn't changed because value hasn't changed }, { name: "sequence (not updated)", state: visitorState{}, left: dyn.NewValue( []dyn.Value{ - dyn.NewValue(42, leftLocation), + dyn.NewValue(42, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), right: dyn.NewValue( []dyn.Value{ - dyn.NewValue(42, rightLocation), + dyn.NewValue(42, []dyn.Location{rightLocation}), }, - rightLocation, + []dyn.Location{rightLocation}, ), expected: dyn.NewValue( []dyn.Value{ - dyn.NewValue(42, leftLocation), + dyn.NewValue(42, []dyn.Location{leftLocation}), }, - leftLocation, + []dyn.Location{leftLocation}, ), }, { name: "nil (not updated)", state: visitorState{}, - left: dyn.NilValue.WithLocation(leftLocation), - right: dyn.NilValue.WithLocation(rightLocation), - expected: dyn.NilValue.WithLocation(leftLocation), + left: dyn.NilValue.WithLocations([]dyn.Location{leftLocation}), + right: dyn.NilValue.WithLocations([]dyn.Location{rightLocation}), + expected: dyn.NilValue.WithLocations([]dyn.Location{leftLocation}), }, { name: "nil (updated)", state: visitorState{updated: []string{"root"}}, left: dyn.NilValue, - right: dyn.NewValue(42, rightLocation), - expected: dyn.NewValue(42, rightLocation), + right: dyn.NewValue(42, []dyn.Location{rightLocation}), + expected: dyn.NewValue(42, []dyn.Location{rightLocation}), }, { name: "change kind (updated)", state: visitorState{updated: []string{"root"}}, - left: dyn.NewValue(42.0, leftLocation), - right: dyn.NewValue(42, rightLocation), - expected: dyn.NewValue(42, rightLocation), + left: dyn.NewValue(42.0, []dyn.Location{leftLocation}), + right: dyn.NewValue(42, []dyn.Location{rightLocation}), + expected: dyn.NewValue(42, []dyn.Location{rightLocation}), }, } @@ -375,7 +380,7 @@ func TestOverride_Primitive(t *testing.T) { }) t.Run(tc.name+" - visitor overrides value", func(t *testing.T) { - expected := dyn.NewValue("return value", dyn.Location{}) + expected := dyn.V("return value") s, visitor := createVisitor(visitorOpts{returnValue: &expected}) out, err := override(dyn.EmptyPath, tc.left, tc.right, visitor) @@ -427,17 +432,17 @@ func TestOverride_PreserveMappingKeys(t *testing.T) { rightValueLocation := dyn.Location{File: "right.yml", Line: 3, Column: 1} left := dyn.NewMapping() - left.Set(dyn.NewValue("a", leftKeyLocation), dyn.NewValue(42, leftValueLocation)) + left.Set(dyn.NewValue("a", []dyn.Location{leftKeyLocation}), dyn.NewValue(42, []dyn.Location{leftValueLocation})) right := dyn.NewMapping() - right.Set(dyn.NewValue("a", rightKeyLocation), dyn.NewValue(7, rightValueLocation)) + right.Set(dyn.NewValue("a", []dyn.Location{rightKeyLocation}), dyn.NewValue(7, []dyn.Location{rightValueLocation})) state, visitor := createVisitor(visitorOpts{}) out, err := override( dyn.EmptyPath, - dyn.NewValue(left, leftLocation), - dyn.NewValue(right, rightLocation), + dyn.NewValue(left, []dyn.Location{leftLocation}), + dyn.NewValue(right, []dyn.Location{rightLocation}), visitor, ) diff --git a/libs/dyn/pattern.go b/libs/dyn/pattern.go index a265dad08..aecdc3ca6 100644 --- a/libs/dyn/pattern.go +++ b/libs/dyn/pattern.go @@ -72,7 +72,7 @@ func (c anyKeyComponent) visit(v Value, prefix Path, suffix Pattern, opts visitO m.Set(pk, nv) } - return NewValue(m, v.Location()), nil + return NewValue(m, v.Locations()), nil } type anyIndexComponent struct{} @@ -103,5 +103,5 @@ func (c anyIndexComponent) visit(v Value, prefix Path, suffix Pattern, opts visi s[i] = nv } - return NewValue(s, v.Location()), nil + return NewValue(s, v.Locations()), nil } diff --git a/libs/dyn/value.go b/libs/dyn/value.go index 3d62ea1f5..2aed2f6cd 100644 --- a/libs/dyn/value.go +++ b/libs/dyn/value.go @@ -2,13 +2,18 @@ package dyn import ( "fmt" + "slices" ) type Value struct { v any k Kind - l Location + + // List of locations this value is defined at. The first location in the slice + // is the location returned by the `.Location()` method and is typically used + // for reporting errors and warnings associated with the value. + l []Location // Whether or not this value is an anchor. // If this node doesn't map to a type, we don't need to warn about it. @@ -27,11 +32,11 @@ var NilValue = Value{ // V constructs a new Value with the given value. func V(v any) Value { - return NewValue(v, Location{}) + return NewValue(v, []Location{}) } // NewValue constructs a new Value with the given value and location. -func NewValue(v any, loc Location) Value { +func NewValue(v any, loc []Location) Value { switch vin := v.(type) { case map[string]Value: v = newMappingFromGoMap(vin) @@ -40,16 +45,30 @@ func NewValue(v any, loc Location) Value { return Value{ v: v, k: kindOf(v), - l: loc, + + // create a copy of the locations, so that mutations to the original slice + // don't affect new value. + l: slices.Clone(loc), } } -// WithLocation returns a new Value with its location set to the given value. -func (v Value) WithLocation(loc Location) Value { +// WithLocations returns a new Value with its location set to the given value. +func (v Value) WithLocations(loc []Location) Value { return Value{ v: v.v, k: v.k, - l: loc, + + // create a copy of the locations, so that mutations to the original slice + // don't affect new value. + l: slices.Clone(loc), + } +} + +func (v Value) AppendLocationsFromValue(w Value) Value { + return Value{ + v: v.v, + k: v.k, + l: append(v.l, w.l...), } } @@ -61,10 +80,18 @@ func (v Value) Value() any { return v.v } -func (v Value) Location() Location { +func (v Value) Locations() []Location { return v.l } +func (v Value) Location() Location { + if len(v.l) == 0 { + return Location{} + } + + return v.l[0] +} + func (v Value) IsValid() bool { return v.k != KindInvalid } @@ -153,7 +180,10 @@ func (v Value) IsAnchor() bool { // We need a custom implementation because maps and slices // cannot be compared with the regular == operator. func (v Value) eq(w Value) bool { - if v.k != w.k || v.l != w.l { + if v.k != w.k { + return false + } + if !slices.Equal(v.l, w.l) { return false } diff --git a/libs/dyn/value_test.go b/libs/dyn/value_test.go index bbdc2c96b..6a0a27b8d 100644 --- a/libs/dyn/value_test.go +++ b/libs/dyn/value_test.go @@ -25,16 +25,19 @@ func TestValueAsMap(t *testing.T) { _, ok := zeroValue.AsMap() assert.False(t, ok) - var intValue = dyn.NewValue(1, dyn.Location{}) + var intValue = dyn.V(1) _, ok = intValue.AsMap() assert.False(t, ok) var mapValue = dyn.NewValue( map[string]dyn.Value{ - "key": dyn.NewValue("value", dyn.Location{File: "file", Line: 1, Column: 2}), + "key": dyn.NewValue( + "value", + []dyn.Location{{File: "file", Line: 1, Column: 2}}), }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ) + m, ok := mapValue.AsMap() assert.True(t, ok) assert.Equal(t, 1, m.Len()) @@ -43,6 +46,6 @@ func TestValueAsMap(t *testing.T) { func TestValueIsValid(t *testing.T) { var zeroValue dyn.Value assert.False(t, zeroValue.IsValid()) - var intValue = dyn.NewValue(1, dyn.Location{}) + var intValue = dyn.V(1) assert.True(t, intValue.IsValid()) } diff --git a/libs/dyn/value_underlying_test.go b/libs/dyn/value_underlying_test.go index 83cffb772..e35cde582 100644 --- a/libs/dyn/value_underlying_test.go +++ b/libs/dyn/value_underlying_test.go @@ -11,7 +11,7 @@ import ( func TestValueUnderlyingMap(t *testing.T) { v := dyn.V( map[string]dyn.Value{ - "key": dyn.NewValue("value", dyn.Location{File: "file", Line: 1, Column: 2}), + "key": dyn.NewValue("value", []dyn.Location{{File: "file", Line: 1, Column: 2}}), }, ) @@ -33,7 +33,7 @@ func TestValueUnderlyingMap(t *testing.T) { func TestValueUnderlyingSequence(t *testing.T) { v := dyn.V( []dyn.Value{ - dyn.NewValue("value", dyn.Location{File: "file", Line: 1, Column: 2}), + dyn.NewValue("value", []dyn.Location{{File: "file", Line: 1, Column: 2}}), }, ) diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index 56a9cf9f3..cd2cd4831 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -27,7 +27,7 @@ func Foreach(fn MapFunc) MapFunc { } m.Set(pk, nv) } - return NewValue(m, v.Location()), nil + return NewValue(m, v.Locations()), nil case KindSequence: s := slices.Clone(v.MustSequence()) for i, value := range s { @@ -37,7 +37,7 @@ func Foreach(fn MapFunc) MapFunc { return InvalidValue, err } } - return NewValue(s, v.Location()), nil + return NewValue(s, v.Locations()), nil default: return InvalidValue, fmt.Errorf("expected a map or sequence, found %s", v.Kind()) } diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index e6a16f79e..fbb52b504 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -86,7 +86,7 @@ func (d *loader) loadSequence(node *yaml.Node, loc dyn.Location) (dyn.Value, err acc[i] = v } - return dyn.NewValue(acc, loc), nil + return dyn.NewValue(acc, []dyn.Location{loc}), nil } func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { @@ -130,7 +130,7 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro } if merge == nil { - return dyn.NewValue(acc, loc), nil + return dyn.NewValue(acc, []dyn.Location{loc}), nil } // Build location for the merge node. @@ -171,20 +171,20 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro out.Merge(m) } - return dyn.NewValue(out, loc), nil + return dyn.NewValue(out, []dyn.Location{loc}), nil } func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { st := node.ShortTag() switch st { case "!!str": - return dyn.NewValue(node.Value, loc), nil + return dyn.NewValue(node.Value, []dyn.Location{loc}), nil case "!!bool": switch strings.ToLower(node.Value) { case "true": - return dyn.NewValue(true, loc), nil + return dyn.NewValue(true, []dyn.Location{loc}), nil case "false": - return dyn.NewValue(false, loc), nil + return dyn.NewValue(false, []dyn.Location{loc}), nil default: return dyn.InvalidValue, errorf(loc, "invalid bool value: %v", node.Value) } @@ -195,17 +195,17 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error } // Use regular int type instead of int64 if possible. if i64 >= math.MinInt32 && i64 <= math.MaxInt32 { - return dyn.NewValue(int(i64), loc), nil + return dyn.NewValue(int(i64), []dyn.Location{loc}), nil } - return dyn.NewValue(i64, loc), nil + return dyn.NewValue(i64, []dyn.Location{loc}), nil case "!!float": f64, err := strconv.ParseFloat(node.Value, 64) if err != nil { return dyn.InvalidValue, errorf(loc, "invalid float value: %v", node.Value) } - return dyn.NewValue(f64, loc), nil + return dyn.NewValue(f64, []dyn.Location{loc}), nil case "!!null": - return dyn.NewValue(nil, loc), nil + return dyn.NewValue(nil, []dyn.Location{loc}), nil case "!!timestamp": // Try a couple of layouts for _, layout := range []string{ @@ -216,7 +216,7 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error } { t, terr := time.Parse(layout, node.Value) if terr == nil { - return dyn.NewValue(t, loc), nil + return dyn.NewValue(t, []dyn.Location{loc}), nil } } return dyn.InvalidValue, errorf(loc, "invalid timestamp value: %v", node.Value) diff --git a/libs/dyn/yamlsaver/saver_test.go b/libs/dyn/yamlsaver/saver_test.go index bdf1891cd..387090104 100644 --- a/libs/dyn/yamlsaver/saver_test.go +++ b/libs/dyn/yamlsaver/saver_test.go @@ -19,7 +19,7 @@ func TestMarshalNilValue(t *testing.T) { func TestMarshalIntValue(t *testing.T) { s := NewSaver() - var intValue = dyn.NewValue(1, dyn.Location{}) + var intValue = dyn.V(1) v, err := s.toYamlNode(intValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) @@ -28,7 +28,7 @@ func TestMarshalIntValue(t *testing.T) { func TestMarshalFloatValue(t *testing.T) { s := NewSaver() - var floatValue = dyn.NewValue(1.0, dyn.Location{}) + var floatValue = dyn.V(1.0) v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) @@ -37,7 +37,7 @@ func TestMarshalFloatValue(t *testing.T) { func TestMarshalBoolValue(t *testing.T) { s := NewSaver() - var boolValue = dyn.NewValue(true, dyn.Location{}) + var boolValue = dyn.V(true) v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) @@ -46,7 +46,7 @@ func TestMarshalBoolValue(t *testing.T) { func TestMarshalTimeValue(t *testing.T) { s := NewSaver() - var timeValue = dyn.NewValue(time.Unix(0, 0), dyn.Location{}) + var timeValue = dyn.V(time.Unix(0, 0)) v, err := s.toYamlNode(timeValue) assert.NoError(t, err) assert.Equal(t, "1970-01-01 00:00:00 +0000 UTC", v.Value) @@ -57,10 +57,10 @@ func TestMarshalSequenceValue(t *testing.T) { s := NewSaver() var sequenceValue = dyn.NewValue( []dyn.Value{ - dyn.NewValue("value1", dyn.Location{File: "file", Line: 1, Column: 2}), - dyn.NewValue("value2", dyn.Location{File: "file", Line: 2, Column: 2}), + dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), + dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ) v, err := s.toYamlNode(sequenceValue) assert.NoError(t, err) @@ -71,7 +71,7 @@ func TestMarshalSequenceValue(t *testing.T) { func TestMarshalStringValue(t *testing.T) { s := NewSaver() - var stringValue = dyn.NewValue("value", dyn.Location{}) + var stringValue = dyn.V("value") v, err := s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "value", v.Value) @@ -82,12 +82,13 @@ func TestMarshalMapValue(t *testing.T) { s := NewSaver() var mapValue = dyn.NewValue( map[string]dyn.Value{ - "key3": dyn.NewValue("value3", dyn.Location{File: "file", Line: 3, Column: 2}), - "key2": dyn.NewValue("value2", dyn.Location{File: "file", Line: 2, Column: 2}), - "key1": dyn.NewValue("value1", dyn.Location{File: "file", Line: 1, Column: 2}), + "key3": dyn.NewValue("value3", []dyn.Location{{File: "file", Line: 3, Column: 2}}), + "key2": dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), + "key1": dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ) + v, err := s.toYamlNode(mapValue) assert.NoError(t, err) assert.Equal(t, yaml.MappingNode, v.Kind) @@ -107,12 +108,12 @@ func TestMarshalNestedValues(t *testing.T) { map[string]dyn.Value{ "key1": dyn.NewValue( map[string]dyn.Value{ - "key2": dyn.NewValue("value", dyn.Location{File: "file", Line: 1, Column: 2}), + "key2": dyn.NewValue("value", []dyn.Location{{File: "file", Line: 1, Column: 2}}), }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ), }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ) v, err := s.toYamlNode(mapValue) assert.NoError(t, err) @@ -125,14 +126,14 @@ func TestMarshalNestedValues(t *testing.T) { func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { s := NewSaver() - var hexValue = dyn.NewValue(0x123, dyn.Location{}) + var hexValue = dyn.V(0x123) v, err := s.toYamlNode(hexValue) assert.NoError(t, err) assert.Equal(t, "291", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.NewValue("0x123", dyn.Location{}) + var stringValue = dyn.V("0x123") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0x123", v.Value) @@ -142,14 +143,14 @@ func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { func TestMarshalBinaryValueIsQuoted(t *testing.T) { s := NewSaver() - var binaryValue = dyn.NewValue(0b101, dyn.Location{}) + var binaryValue = dyn.V(0b101) v, err := s.toYamlNode(binaryValue) assert.NoError(t, err) assert.Equal(t, "5", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.NewValue("0b101", dyn.Location{}) + var stringValue = dyn.V("0b101") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0b101", v.Value) @@ -159,14 +160,14 @@ func TestMarshalBinaryValueIsQuoted(t *testing.T) { func TestMarshalOctalValueIsQuoted(t *testing.T) { s := NewSaver() - var octalValue = dyn.NewValue(0123, dyn.Location{}) + var octalValue = dyn.V(0123) v, err := s.toYamlNode(octalValue) assert.NoError(t, err) assert.Equal(t, "83", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.NewValue("0123", dyn.Location{}) + var stringValue = dyn.V("0123") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0123", v.Value) @@ -176,14 +177,14 @@ func TestMarshalOctalValueIsQuoted(t *testing.T) { func TestMarshalFloatValueIsQuoted(t *testing.T) { s := NewSaver() - var floatValue = dyn.NewValue(1.0, dyn.Location{}) + var floatValue = dyn.V(1.0) v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.NewValue("1.0", dyn.Location{}) + var stringValue = dyn.V("1.0") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "1.0", v.Value) @@ -193,14 +194,14 @@ func TestMarshalFloatValueIsQuoted(t *testing.T) { func TestMarshalBoolValueIsQuoted(t *testing.T) { s := NewSaver() - var boolValue = dyn.NewValue(true, dyn.Location{}) + var boolValue = dyn.V(true) v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.NewValue("true", dyn.Location{}) + var stringValue = dyn.V("true") v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) @@ -215,18 +216,18 @@ func TestCustomStylingWithNestedMap(t *testing.T) { var styledMap = dyn.NewValue( map[string]dyn.Value{ - "key1": dyn.NewValue("value1", dyn.Location{File: "file", Line: 1, Column: 2}), - "key2": dyn.NewValue("value2", dyn.Location{File: "file", Line: 2, Column: 2}), + "key1": dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), + "key2": dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), }, - dyn.Location{File: "file", Line: -2, Column: 2}, + []dyn.Location{{File: "file", Line: -2, Column: 2}}, ) var unstyledMap = dyn.NewValue( map[string]dyn.Value{ - "key3": dyn.NewValue("value3", dyn.Location{File: "file", Line: 1, Column: 2}), - "key4": dyn.NewValue("value4", dyn.Location{File: "file", Line: 2, Column: 2}), + "key3": dyn.NewValue("value3", []dyn.Location{{File: "file", Line: 1, Column: 2}}), + "key4": dyn.NewValue("value4", []dyn.Location{{File: "file", Line: 2, Column: 2}}), }, - dyn.Location{File: "file", Line: -1, Column: 2}, + []dyn.Location{{File: "file", Line: -1, Column: 2}}, ) var val = dyn.NewValue( @@ -234,7 +235,7 @@ func TestCustomStylingWithNestedMap(t *testing.T) { "styled": styledMap, "unstyled": unstyledMap, }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ) mv, err := s.toYamlNode(val) diff --git a/libs/dyn/yamlsaver/utils.go b/libs/dyn/yamlsaver/utils.go index fa5ab08fb..a162bf31f 100644 --- a/libs/dyn/yamlsaver/utils.go +++ b/libs/dyn/yamlsaver/utils.go @@ -44,7 +44,7 @@ func skipAndOrder(mv dyn.Value, order *Order, skipFields []string, dst map[strin continue } - dst[k] = dyn.NewValue(v.Value(), dyn.Location{Line: order.Get(k)}) + dst[k] = dyn.NewValue(v.Value(), []dyn.Location{{Line: order.Get(k)}}) } return dyn.V(dst), nil diff --git a/libs/dyn/yamlsaver/utils_test.go b/libs/dyn/yamlsaver/utils_test.go index 04b4c404f..1afab601a 100644 --- a/libs/dyn/yamlsaver/utils_test.go +++ b/libs/dyn/yamlsaver/utils_test.go @@ -33,16 +33,25 @@ func TestConvertToMapValueWithOrder(t *testing.T) { assert.NoError(t, err) assert.Equal(t, dyn.V(map[string]dyn.Value{ - "list": dyn.NewValue([]dyn.Value{ - dyn.V("a"), - dyn.V("b"), - dyn.V("c"), - }, dyn.Location{Line: -3}), - "name": dyn.NewValue("test", dyn.Location{Line: -2}), - "map": dyn.NewValue(map[string]dyn.Value{ - "key1": dyn.V("value1"), - "key2": dyn.V("value2"), - }, dyn.Location{Line: -1}), - "long_name_field": dyn.NewValue("long name goes here", dyn.Location{Line: 1}), + "list": dyn.NewValue( + []dyn.Value{ + dyn.V("a"), + dyn.V("b"), + dyn.V("c"), + }, + []dyn.Location{{Line: -3}}, + ), + "name": dyn.NewValue( + "test", + []dyn.Location{{Line: -2}}, + ), + "map": dyn.NewValue( + map[string]dyn.Value{ + "key1": dyn.V("value1"), + "key2": dyn.V("value2"), + }, + []dyn.Location{{Line: -1}}, + ), + "long_name_field": dyn.NewValue("long name goes here", []dyn.Location{{Line: 1}}), }), result) } From 10fe02075fec0b2e18d2eacf7412816d6e81d6bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 13:40:12 +0200 Subject: [PATCH 286/286] Bump github.com/databricks/databricks-sdk-go from 0.43.0 to 0.43.2 (#1594) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.43.0 to 0.43.2.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.43.2

Release v0.43.2

Internal Changes

  • Enforce Tag on PRs (#969).
  • Generate SDK for apierr changes (#970).
  • Add Release tag and Workflow Fix (#972).

v0.43.1

0.43.1

Major Changes and Improvements:

  • Add a credentials provider for Github Azure OIDC (#965).
  • Add DataPlane API Support (#936).
  • Added more error messages for retriable errors (timeouts, etc.) (#963).

Internal Changes

  • Add ChangelogConfig to Generator struct (#967).
  • Improve Changelog by grouping changes (#962).
  • Parse API Error messages with int error codes (#960).
Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

0.43.2

Internal Changes

  • Enforce Tag on PRs (#969).
  • Generate SDK for apierr changes (#970).
  • Add Release tag and Workflow Fix (#972).

0.43.1

Major Changes and Improvements:

  • Add a credentials provider for Github Azure OIDC (#965).
  • Add DataPlane API Support (#936).
  • Added more error messages for retriable errors (timeouts, etc.) (#963).

Internal Changes

  • Add ChangelogConfig to Generator struct (#967).
  • Improve Changelog by grouping changes (#962).
  • Parse API Error messages with int error codes (#960).
Commits

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.43.0&new-version=0.43.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ce7ad0c1e..5e29d295e 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.43.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.43.2 // Apache 2.0 github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index eb7a87a89..8f774a47a 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.43.0 h1:x4laolWhYlsQg2t8yWEGyRPZy4/Wv3pKnLEoJfVin7I= -github.com/databricks/databricks-sdk-go v0.43.0/go.mod h1:a9rr0FOHLL26kOjQjZZVFjIYmRABCbrAWVeundDEVG8= +github.com/databricks/databricks-sdk-go v0.43.2 h1:4B+sHAYO5kFqwZNQRmsF70eecqsFX6i/0KfXoDFQT/E= +github.com/databricks/databricks-sdk-go v0.43.2/go.mod h1:nlzeOEgJ1Tmb5HyknBJ3GEorCZKWqEBoHprvPmTSNq8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=